webscout 8.2.2__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (483) hide show
  1. webscout/AIauto.py +524 -143
  2. webscout/AIbase.py +247 -123
  3. webscout/AIutel.py +68 -132
  4. webscout/Bard.py +1072 -535
  5. webscout/Extra/GitToolkit/__init__.py +2 -2
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -0
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -0
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -45
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +189 -18
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -682
  37. webscout/Extra/tempmail/README.md +488 -0
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +237 -304
  49. webscout/Provider/AISEARCH/README.md +106 -0
  50. webscout/Provider/AISEARCH/__init__.py +16 -10
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +130 -209
  53. webscout/Provider/AISEARCH/monica_search.py +200 -246
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -281
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -0
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +343 -173
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +60 -54
  67. webscout/Provider/GithubChat.py +385 -367
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -670
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -233
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -266
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -381
  77. webscout/Provider/Netwrck.py +273 -228
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -0
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -0
  85. webscout/Provider/OPENAI/__init__.py +148 -25
  86. webscout/Provider/OPENAI/ai4chat.py +348 -0
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/ayle.py +365 -0
  90. webscout/Provider/OPENAI/base.py +253 -46
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +514 -193
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -0
  94. webscout/Provider/OPENAI/deepinfra.py +403 -272
  95. webscout/Provider/OPENAI/e2b.py +2370 -1350
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +186 -138
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -0
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +100 -104
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -327
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +110 -84
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -0
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -0
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +126 -115
  115. webscout/Provider/OPENAI/textpollinations.py +218 -133
  116. webscout/Provider/OPENAI/toolbaz.py +136 -166
  117. webscout/Provider/OPENAI/typefully.py +419 -0
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -211
  120. webscout/Provider/OPENAI/wisecat.py +103 -125
  121. webscout/Provider/OPENAI/writecream.py +185 -156
  122. webscout/Provider/OPENAI/x0gpt.py +227 -136
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -344
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -0
  133. webscout/Provider/TTI/__init__.py +37 -12
  134. webscout/Provider/TTI/base.py +147 -0
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -0
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -0
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -0
  141. webscout/Provider/TTS/README.md +186 -0
  142. webscout/Provider/TTS/__init__.py +43 -7
  143. webscout/Provider/TTS/base.py +523 -0
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -0
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -180
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +221 -121
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -199
  158. webscout/Provider/TypliAI.py +311 -0
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -0
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +160 -145
  161. webscout/Provider/UNFINISHED/GizAI.py +300 -0
  162. webscout/Provider/UNFINISHED/Marcus.py +218 -0
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/UNFINISHED/XenAI.py +330 -0
  165. webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +64 -47
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -0
  170. webscout/Provider/UNFINISHED/samurai.py +231 -0
  171. webscout/Provider/WiseCat.py +256 -196
  172. webscout/Provider/WrDoChat.py +390 -0
  173. webscout/Provider/__init__.py +115 -198
  174. webscout/Provider/ai4chat.py +181 -202
  175. webscout/Provider/akashgpt.py +330 -342
  176. webscout/Provider/cerebras.py +397 -242
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -234
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -266
  182. webscout/Provider/llama3mitril.py +230 -180
  183. webscout/Provider/llmchat.py +308 -213
  184. webscout/Provider/llmchatco.py +321 -311
  185. webscout/Provider/meta.py +996 -794
  186. webscout/Provider/oivscode.py +332 -0
  187. webscout/Provider/searchchat.py +316 -293
  188. webscout/Provider/sonus.py +264 -208
  189. webscout/Provider/toolbaz.py +359 -320
  190. webscout/Provider/turboseek.py +332 -219
  191. webscout/Provider/typefully.py +262 -280
  192. webscout/Provider/x0gpt.py +332 -256
  193. webscout/__init__.py +31 -38
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -293
  196. webscout/client.py +1497 -0
  197. webscout/conversation.py +140 -565
  198. webscout/exceptions.py +383 -339
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +32 -378
  204. webscout/prompt_manager.py +376 -274
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -0
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -140
  210. webscout/scout/core/scout.py +800 -568
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -460
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -809
  284. webscout/swiftcli/core/__init__.py +7 -0
  285. webscout/swiftcli/core/cli.py +574 -0
  286. webscout/swiftcli/core/context.py +98 -0
  287. webscout/swiftcli/core/group.py +268 -0
  288. webscout/swiftcli/decorators/__init__.py +28 -0
  289. webscout/swiftcli/decorators/command.py +243 -0
  290. webscout/swiftcli/decorators/options.py +247 -0
  291. webscout/swiftcli/decorators/output.py +392 -0
  292. webscout/swiftcli/exceptions.py +21 -0
  293. webscout/swiftcli/plugins/__init__.py +9 -0
  294. webscout/swiftcli/plugins/base.py +134 -0
  295. webscout/swiftcli/plugins/manager.py +269 -0
  296. webscout/swiftcli/utils/__init__.py +58 -0
  297. webscout/swiftcli/utils/formatting.py +251 -0
  298. webscout/swiftcli/utils/parsing.py +368 -0
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -55
  304. webscout/zeroart/base.py +70 -60
  305. webscout/zeroart/effects.py +155 -99
  306. webscout/zeroart/fonts.py +1799 -816
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. webscout-2026.1.19.dist-info/entry_points.txt +4 -0
  311. webscout-2026.1.19.dist-info/top_level.txt +1 -0
  312. inferno/__init__.py +0 -6
  313. inferno/__main__.py +0 -9
  314. inferno/cli.py +0 -6
  315. webscout/DWEBS.py +0 -477
  316. webscout/Extra/autocoder/__init__.py +0 -9
  317. webscout/Extra/autocoder/autocoder.py +0 -849
  318. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  319. webscout/LLM.py +0 -442
  320. webscout/Litlogger/__init__.py +0 -67
  321. webscout/Litlogger/core/__init__.py +0 -6
  322. webscout/Litlogger/core/level.py +0 -23
  323. webscout/Litlogger/core/logger.py +0 -165
  324. webscout/Litlogger/handlers/__init__.py +0 -12
  325. webscout/Litlogger/handlers/console.py +0 -33
  326. webscout/Litlogger/handlers/file.py +0 -143
  327. webscout/Litlogger/handlers/network.py +0 -173
  328. webscout/Litlogger/styles/__init__.py +0 -7
  329. webscout/Litlogger/styles/colors.py +0 -249
  330. webscout/Litlogger/styles/formats.py +0 -458
  331. webscout/Litlogger/styles/text.py +0 -87
  332. webscout/Litlogger/utils/__init__.py +0 -6
  333. webscout/Litlogger/utils/detectors.py +0 -153
  334. webscout/Litlogger/utils/formatters.py +0 -200
  335. webscout/Local/__init__.py +0 -12
  336. webscout/Local/__main__.py +0 -9
  337. webscout/Local/api.py +0 -576
  338. webscout/Local/cli.py +0 -516
  339. webscout/Local/config.py +0 -75
  340. webscout/Local/llm.py +0 -287
  341. webscout/Local/model_manager.py +0 -253
  342. webscout/Local/server.py +0 -721
  343. webscout/Local/utils.py +0 -93
  344. webscout/Provider/AI21.py +0 -177
  345. webscout/Provider/AISEARCH/DeepFind.py +0 -250
  346. webscout/Provider/AISEARCH/ISou.py +0 -256
  347. webscout/Provider/AISEARCH/felo_search.py +0 -228
  348. webscout/Provider/AISEARCH/genspark_search.py +0 -208
  349. webscout/Provider/AISEARCH/hika_search.py +0 -194
  350. webscout/Provider/AISEARCH/scira_search.py +0 -324
  351. webscout/Provider/Aitopia.py +0 -292
  352. webscout/Provider/AllenAI.py +0 -413
  353. webscout/Provider/Blackboxai.py +0 -229
  354. webscout/Provider/C4ai.py +0 -432
  355. webscout/Provider/ChatGPTClone.py +0 -226
  356. webscout/Provider/ChatGPTES.py +0 -237
  357. webscout/Provider/ChatGPTGratis.py +0 -194
  358. webscout/Provider/Chatify.py +0 -175
  359. webscout/Provider/Cloudflare.py +0 -273
  360. webscout/Provider/DeepSeek.py +0 -196
  361. webscout/Provider/ElectronHub.py +0 -709
  362. webscout/Provider/ExaChat.py +0 -342
  363. webscout/Provider/Free2GPT.py +0 -241
  364. webscout/Provider/GPTWeb.py +0 -193
  365. webscout/Provider/Glider.py +0 -211
  366. webscout/Provider/HF_space/__init__.py +0 -0
  367. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  368. webscout/Provider/HuggingFaceChat.py +0 -462
  369. webscout/Provider/Hunyuan.py +0 -272
  370. webscout/Provider/LambdaChat.py +0 -392
  371. webscout/Provider/Llama.py +0 -200
  372. webscout/Provider/Llama3.py +0 -204
  373. webscout/Provider/Marcus.py +0 -148
  374. webscout/Provider/OLLAMA.py +0 -396
  375. webscout/Provider/OPENAI/c4ai.py +0 -367
  376. webscout/Provider/OPENAI/chatgptclone.py +0 -460
  377. webscout/Provider/OPENAI/exachat.py +0 -433
  378. webscout/Provider/OPENAI/freeaichat.py +0 -352
  379. webscout/Provider/OPENAI/opkfc.py +0 -488
  380. webscout/Provider/OPENAI/scirachat.py +0 -463
  381. webscout/Provider/OPENAI/standardinput.py +0 -425
  382. webscout/Provider/OPENAI/typegpt.py +0 -346
  383. webscout/Provider/OPENAI/uncovrAI.py +0 -455
  384. webscout/Provider/OPENAI/venice.py +0 -413
  385. webscout/Provider/OPENAI/yep.py +0 -327
  386. webscout/Provider/OpenGPT.py +0 -199
  387. webscout/Provider/Perplexitylabs.py +0 -415
  388. webscout/Provider/Phind.py +0 -535
  389. webscout/Provider/PizzaGPT.py +0 -198
  390. webscout/Provider/Reka.py +0 -214
  391. webscout/Provider/StandardInput.py +0 -278
  392. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  393. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  394. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  395. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  396. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  397. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  398. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  399. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  400. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  401. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  402. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  403. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  404. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  405. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  406. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  407. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  408. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  409. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  410. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  411. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  412. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  413. webscout/Provider/TTI/artbit/__init__.py +0 -22
  414. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  415. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  416. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  417. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  418. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  419. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  420. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  421. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  422. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  423. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  424. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  425. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  426. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  427. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  428. webscout/Provider/TTI/talkai/__init__.py +0 -4
  429. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  430. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  431. webscout/Provider/TTS/gesserit.py +0 -127
  432. webscout/Provider/TeachAnything.py +0 -187
  433. webscout/Provider/Venice.py +0 -219
  434. webscout/Provider/VercelAI.py +0 -234
  435. webscout/Provider/WebSim.py +0 -228
  436. webscout/Provider/Writecream.py +0 -211
  437. webscout/Provider/WritingMate.py +0 -197
  438. webscout/Provider/aimathgpt.py +0 -189
  439. webscout/Provider/askmyai.py +0 -158
  440. webscout/Provider/asksteve.py +0 -203
  441. webscout/Provider/bagoodex.py +0 -145
  442. webscout/Provider/chatglm.py +0 -205
  443. webscout/Provider/copilot.py +0 -428
  444. webscout/Provider/freeaichat.py +0 -271
  445. webscout/Provider/gaurish.py +0 -244
  446. webscout/Provider/geminiprorealtime.py +0 -160
  447. webscout/Provider/granite.py +0 -187
  448. webscout/Provider/hermes.py +0 -219
  449. webscout/Provider/koala.py +0 -268
  450. webscout/Provider/labyrinth.py +0 -340
  451. webscout/Provider/lepton.py +0 -194
  452. webscout/Provider/llamatutor.py +0 -192
  453. webscout/Provider/multichat.py +0 -325
  454. webscout/Provider/promptrefine.py +0 -193
  455. webscout/Provider/scira_chat.py +0 -277
  456. webscout/Provider/scnet.py +0 -187
  457. webscout/Provider/talkai.py +0 -194
  458. webscout/Provider/tutorai.py +0 -252
  459. webscout/Provider/typegpt.py +0 -232
  460. webscout/Provider/uncovr.py +0 -312
  461. webscout/Provider/yep.py +0 -376
  462. webscout/litprinter/__init__.py +0 -59
  463. webscout/scout/core.py +0 -881
  464. webscout/tempid.py +0 -128
  465. webscout/webscout_search.py +0 -1346
  466. webscout/webscout_search_async.py +0 -877
  467. webscout/yep_search.py +0 -297
  468. webscout-8.2.2.dist-info/METADATA +0 -734
  469. webscout-8.2.2.dist-info/RECORD +0 -309
  470. webscout-8.2.2.dist-info/entry_points.txt +0 -5
  471. webscout-8.2.2.dist-info/top_level.txt +0 -3
  472. webstoken/__init__.py +0 -30
  473. webstoken/classifier.py +0 -189
  474. webstoken/keywords.py +0 -216
  475. webstoken/language.py +0 -128
  476. webstoken/ner.py +0 -164
  477. webstoken/normalizer.py +0 -35
  478. webstoken/processor.py +0 -77
  479. webstoken/sentiment.py +0 -206
  480. webstoken/stemmer.py +0 -73
  481. webstoken/tagger.py +0 -60
  482. webstoken/tokenizer.py +0 -158
  483. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,327 +1,378 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage, get_last_user_message, get_system_prompt, format_prompt # Import format_prompt
12
- )
13
-
14
- # Attempt to import LitAgent, fallback if not available
15
- try:
16
- from webscout.litagent import LitAgent
17
- except ImportError:
18
- # Define a dummy LitAgent if webscout is not installed or accessible
19
- class LitAgent:
20
- def random(self) -> str:
21
- return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
22
-
23
- # --- LLMChatCo Client ---
24
-
25
- class Completions(BaseCompletions):
26
- def __init__(self, client: 'LLMChatCo'):
27
- self._client = client
28
-
29
- def create(
30
- self,
31
- *,
32
- model: str, # Model is now mandatory per request
33
- messages: List[Dict[str, str]],
34
- max_tokens: Optional[int] = 2048, # Note: LLMChatCo doesn't seem to use max_tokens directly in payload
35
- stream: bool = False,
36
- temperature: Optional[float] = None, # Note: LLMChatCo doesn't seem to use temperature directly in payload
37
- top_p: Optional[float] = None, # Note: LLMChatCo doesn't seem to use top_p directly in payload
38
- web_search: bool = False, # LLMChatCo specific parameter
39
- system_prompt: Optional[str] = "You are a helpful assistant.", # Default system prompt if not provided
40
- **kwargs: Any
41
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
- """
43
- Creates a model response for the given chat conversation.
44
- Mimics openai.chat.completions.create
45
- """
46
- if model not in self._client.AVAILABLE_MODELS:
47
- # Raise error as model is mandatory and must be valid for this provider
48
- raise ValueError(f"Model '{model}' not supported by LLMChatCo. Available: {self._client.AVAILABLE_MODELS}")
49
- actual_model = model
50
-
51
- # Determine the effective system prompt
52
- effective_system_prompt = system_prompt # Use the provided system_prompt or its default
53
- message_list_system_prompt = get_system_prompt(messages)
54
- # If a system prompt is also in messages, the explicit one takes precedence.
55
- # We'll use the effective_system_prompt determined above.
56
-
57
- # Prepare final messages list, ensuring only one system message at the start
58
- final_messages = []
59
- if effective_system_prompt:
60
- final_messages.append({"role": "system", "content": effective_system_prompt})
61
- final_messages.extend([msg for msg in messages if msg.get("role") != "system"])
62
-
63
- # Extract the last user prompt using the utility function for the separate 'prompt' field
64
- last_user_prompt = get_last_user_message(final_messages)
65
-
66
- # Note: format_prompt is not directly used here as the API requires the structured 'messages' list
67
- # and a separate 'prompt' field, rather than a single formatted string.
68
-
69
- # Generate a unique ID for this message
70
- thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
71
-
72
- payload = {
73
- "mode": actual_model,
74
- "prompt": last_user_prompt, # LLMChatCo seems to require the last prompt separately
75
- "threadId": self._client.thread_id,
76
- "messages": final_messages, # Use the reconstructed final_messages list
77
- "mcpConfig": {}, # Keep structure as observed
78
- "threadItemId": thread_item_id,
79
- "parentThreadItemId": "", # Assuming no parent for simplicity
80
- "webSearch": web_search,
81
- "showSuggestions": True # Keep structure as observed
82
- }
83
-
84
- # Add any extra kwargs to the payload if needed, though LLMChatCo seems limited
85
- payload.update(kwargs)
86
-
87
- request_id = f"chatcmpl-{uuid.uuid4()}"
88
- created_time = int(time.time())
89
-
90
- if stream:
91
- return self._create_stream(request_id, created_time, actual_model, payload)
92
- else:
93
- return self._create_non_stream(request_id, created_time, actual_model, payload)
94
-
95
- def _create_stream(
96
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
97
- ) -> Generator[ChatCompletionChunk, None, None]:
98
- try:
99
- response = self._client.session.post(
100
- self._client.api_endpoint,
101
- headers=self._client.headers,
102
- json=payload,
103
- stream=True,
104
- timeout=self._client.timeout
105
- )
106
-
107
- if not response.ok:
108
- raise IOError(
109
- f"LLMChatCo API Error: {response.status_code} {response.reason} - {response.text}"
110
- )
111
-
112
- full_response_text = ""
113
- current_event = None
114
- buffer = ""
115
-
116
- for chunk_bytes in response.iter_content(chunk_size=None, decode_unicode=False):
117
- if not chunk_bytes:
118
- continue
119
-
120
- buffer += chunk_bytes.decode('utf-8', errors='replace')
121
-
122
- while '\n' in buffer:
123
- line, buffer = buffer.split('\n', 1)
124
- line = line.strip()
125
-
126
- if not line: # End of an event block
127
- current_event = None
128
- continue
129
-
130
- if line.startswith('event:'):
131
- current_event = line[len('event:'):].strip()
132
- elif line.startswith('data:'):
133
- data_content = line[len('data:'):].strip()
134
- if data_content and current_event == 'answer':
135
- try:
136
- json_data = json.loads(data_content)
137
- answer_data = json_data.get("answer", {})
138
- text_chunk = answer_data.get("text", "")
139
- full_text = answer_data.get("fullText")
140
- status = answer_data.get("status")
141
-
142
- # Prefer fullText if available and status is COMPLETED
143
- if full_text is not None and status == "COMPLETED":
144
- delta_content = full_text[len(full_response_text):]
145
- full_response_text = full_text # Update full response tracker
146
- elif text_chunk is not None:
147
- # Calculate delta based on potentially partial 'text' field
148
- delta_content = text_chunk[len(full_response_text):]
149
- full_response_text = text_chunk # Update full response tracker
150
- else:
151
- delta_content = None
152
-
153
- if delta_content:
154
- delta = ChoiceDelta(content=delta_content, role="assistant")
155
- choice = Choice(index=0, delta=delta, finish_reason=None)
156
- chunk = ChatCompletionChunk(
157
- id=request_id,
158
- choices=[choice],
159
- created=created_time,
160
- model=model,
161
- )
162
- yield chunk
163
-
164
- except json.JSONDecodeError:
165
- print(f"Warning: Could not decode JSON data line: {data_content}")
166
- continue
167
- elif data_content and current_event == 'done':
168
- # The 'done' event signals the end of the stream
169
- delta = ChoiceDelta() # Empty delta
170
- choice = Choice(index=0, delta=delta, finish_reason="stop")
171
- chunk = ChatCompletionChunk(
172
- id=request_id,
173
- choices=[choice],
174
- created=created_time,
175
- model=model,
176
- )
177
- yield chunk
178
- return # End the generator
179
-
180
- except requests.exceptions.RequestException as e:
181
- print(f"Error during LLMChatCo stream request: {e}")
182
- raise IOError(f"LLMChatCo request failed: {e}") from e
183
- except Exception as e:
184
- print(f"Unexpected error during LLMChatCo stream: {e}")
185
- raise IOError(f"LLMChatCo stream processing failed: {e}") from e
186
-
187
- # Fallback final chunk if 'done' event wasn't received properly
188
- delta = ChoiceDelta()
189
- choice = Choice(index=0, delta=delta, finish_reason="stop")
190
- chunk = ChatCompletionChunk(
191
- id=request_id,
192
- choices=[choice],
193
- created=created_time,
194
- model=model,
195
- )
196
- yield chunk
197
-
198
-
199
- def _create_non_stream(
200
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
201
- ) -> ChatCompletion:
202
- # Non-streaming requires accumulating stream chunks
203
- full_response_content = ""
204
- finish_reason = "stop" # Assume stop unless error occurs
205
-
206
- try:
207
- stream_generator = self._create_stream(request_id, created_time, model, payload)
208
- for chunk in stream_generator:
209
- if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
210
- full_response_content += chunk.choices[0].delta.content
211
- if chunk.choices and chunk.choices[0].finish_reason:
212
- finish_reason = chunk.choices[0].finish_reason
213
-
214
- except IOError as e:
215
- print(f"Error obtaining non-stream response from LLMChatCo: {e}")
216
- # Return a partial or error response if needed, or re-raise
217
- # For simplicity, we'll return what we have, potentially empty
218
- finish_reason = "error" # Indicate an issue
219
-
220
- # Construct the final ChatCompletion object
221
- message = ChatCompletionMessage(
222
- role="assistant",
223
- content=full_response_content
224
- )
225
- choice = Choice(
226
- index=0,
227
- message=message,
228
- finish_reason=finish_reason
229
- )
230
- # Usage data is not provided by this API, so set to 0
231
- usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
232
-
233
- completion = ChatCompletion(
234
- id=request_id,
235
- choices=[choice],
236
- created=created_time,
237
- model=model,
238
- usage=usage,
239
- )
240
- return completion
241
-
242
- class Chat(BaseChat):
243
- def __init__(self, client: 'LLMChatCo'):
244
- self.completions = Completions(client)
245
-
246
- class LLMChatCo(OpenAICompatibleProvider):
247
- """
248
- OpenAI-compatible client for LLMChat.co API.
249
-
250
- Usage:
251
- client = LLMChatCo()
252
- response = client.chat.completions.create(
253
- model="gemini-flash-2.0", # Model must be specified here
254
- messages=[{"role": "user", "content": "Hello!"}]
255
- )
256
- print(response.choices[0].message.content)
257
- """
258
- AVAILABLE_MODELS = [
259
- "gemini-flash-2.0", # Default model
260
- "llama-4-scout",
261
- "gpt-4o-mini",
262
- # "gpt-4.1",
263
- # "gpt-4.1-mini",
264
- "gpt-4.1-nano",
265
- ]
266
-
267
- def __init__(
268
- self,
269
- timeout: int = 60,
270
- browser: str = "chrome" # For User-Agent generation
271
- ):
272
- """
273
- Initialize the LLMChatCo client.
274
-
275
- Args:
276
- timeout: Request timeout in seconds.
277
- browser: Browser name for LitAgent to generate User-Agent.
278
- """
279
- # Removed model, system_prompt, proxies parameters
280
-
281
- self.timeout = timeout
282
- # Removed self.system_prompt assignment
283
- self.api_endpoint = "https://llmchat.co/api/completion"
284
- self.session = requests.Session()
285
- self.thread_id = str(uuid.uuid4()) # Unique thread ID per client instance
286
-
287
- # Removed proxy handling block
288
-
289
- # Initialize LitAgent for user agent generation and fingerprinting
290
- try:
291
- agent = LitAgent()
292
- fingerprint = agent.generate_fingerprint(browser=browser)
293
- except Exception as e:
294
- print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
295
- # Fallback fingerprint data
296
- fingerprint = {
297
- "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
298
- "accept_language": "en-US,en;q=0.9",
299
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
300
- "platform": "Windows",
301
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
302
- }
303
-
304
- # Initialize headers using the fingerprint
305
- self.headers = {
306
- "Accept": fingerprint["accept"],
307
- "Accept-Encoding": "gzip, deflate, br, zstd", # Standard encoding
308
- "Accept-Language": fingerprint["accept_language"],
309
- "Content-Type": "application/json",
310
- "Cache-Control": "no-cache",
311
- "Connection": "keep-alive",
312
- "Origin": "https://llmchat.co", # Specific origin for LLMChatCo
313
- "Pragma": "no-cache",
314
- "Referer": f"https://llmchat.co/chat/{self.thread_id}", # Specific referer for LLMChatCo
315
- "Sec-Fetch-Dest": "empty",
316
- "Sec-Fetch-Mode": "cors",
317
- "Sec-Fetch-Site": "same-origin",
318
- "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"', # Fallback if empty
319
- "Sec-CH-UA-Mobile": "?0",
320
- "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
321
- "User-Agent": fingerprint["user_agent"],
322
- "DNT": "1", # Added back from previous version
323
- }
324
- self.session.headers.update(self.headers)
325
-
326
- # Initialize the chat interface
327
- self.chat = Chat(self)
1
+ import json
2
+ import time
3
+ import uuid
4
+ from typing import Any, Dict, Generator, List, Optional, Union, cast
5
+
6
+ import requests
7
+
8
+ # Import base classes and utility structures
9
+ from webscout.Provider.OPENAI.base import (
10
+ BaseChat,
11
+ BaseCompletions,
12
+ OpenAICompatibleProvider,
13
+ SimpleModelList,
14
+ )
15
+ from webscout.Provider.OPENAI.utils import (
16
+ ChatCompletion,
17
+ ChatCompletionChunk,
18
+ ChatCompletionMessage, # Import format_prompt
19
+ Choice,
20
+ ChoiceDelta,
21
+ CompletionUsage,
22
+ get_last_user_message,
23
+ get_system_prompt,
24
+ )
25
+
26
+ # Attempt to import LitAgent, fallback if not available
27
+ from ...litagent import LitAgent
28
+
29
+ # --- LLMChatCo Client ---
30
+
31
+
32
+ class Completions(BaseCompletions):
33
+ def __init__(self, client: "LLMChatCo"):
34
+ self._client = client
35
+
36
+ def create(
37
+ self,
38
+ *,
39
+ model: str, # Model is now mandatory per request
40
+ messages: List[Dict[str, str]],
41
+ max_tokens: Optional[
42
+ int
43
+ ] = 2048, # Note: LLMChatCo doesn't seem to use max_tokens directly in payload
44
+ stream: bool = False,
45
+ temperature: Optional[
46
+ float
47
+ ] = None, # Note: LLMChatCo doesn't seem to use temperature directly in payload
48
+ top_p: Optional[
49
+ float
50
+ ] = None, # Note: LLMChatCo doesn't seem to use top_p directly in payload
51
+ web_search: bool = False, # LLMChatCo specific parameter
52
+ system_prompt: Optional[
53
+ str
54
+ ] = "You are a helpful assistant.", # Default system prompt if not provided
55
+ timeout: Optional[int] = None,
56
+ proxies: Optional[Dict[str, str]] = None,
57
+ **kwargs: Any,
58
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
59
+ """
60
+ Creates a model response for the given chat conversation.
61
+ Mimics openai.chat.completions.create
62
+ """
63
+ if model not in self._client.AVAILABLE_MODELS:
64
+ # Raise error as model is mandatory and must be valid for this provider
65
+ raise ValueError(
66
+ f"Model '{model}' not supported by LLMChatCo. Available: {self._client.AVAILABLE_MODELS}"
67
+ )
68
+ actual_model = model
69
+
70
+ # Determine the effective system prompt
71
+ effective_system_prompt = system_prompt # Use the provided system_prompt or its default
72
+ get_system_prompt(messages)
73
+ # If a system prompt is also in messages, the explicit one takes precedence.
74
+ # We'll use the effective_system_prompt determined above.
75
+
76
+ # Prepare final messages list, ensuring only one system message at the start
77
+ final_messages = []
78
+ if effective_system_prompt:
79
+ final_messages.append({"role": "system", "content": effective_system_prompt})
80
+ final_messages.extend([msg for msg in messages if msg.get("role") != "system"])
81
+
82
+ # Extract the last user prompt using the utility function for the separate 'prompt' field
83
+ last_user_prompt = get_last_user_message(final_messages)
84
+
85
+ # Note: format_prompt is not directly used here as the API requires the structured 'messages' list
86
+ # and a separate 'prompt' field, rather than a single formatted string.
87
+
88
+ # Generate a unique ID for this message
89
+ thread_item_id = "".join(str(uuid.uuid4()).split("-"))[:20]
90
+
91
+ payload = {
92
+ "mode": actual_model,
93
+ "prompt": last_user_prompt, # LLMChatCo seems to require the last prompt separately
94
+ "threadId": self._client.thread_id,
95
+ "messages": final_messages, # Use the reconstructed final_messages list
96
+ "mcpConfig": {}, # Keep structure as observed
97
+ "threadItemId": thread_item_id,
98
+ "parentThreadItemId": "", # Assuming no parent for simplicity
99
+ "webSearch": web_search,
100
+ "showSuggestions": True, # Keep structure as observed
101
+ }
102
+
103
+ # Add any extra kwargs to the payload if needed, though LLMChatCo seems limited
104
+ payload.update(kwargs)
105
+
106
+ request_id = f"chatcmpl-{uuid.uuid4()}"
107
+ created_time = int(time.time())
108
+
109
+ if stream:
110
+ return self._create_stream(
111
+ request_id, created_time, actual_model, payload, timeout, proxies
112
+ )
113
+ else:
114
+ return self._create_non_stream(
115
+ request_id, created_time, actual_model, payload, timeout, proxies
116
+ )
117
+
118
+ def _create_stream(
119
+ self,
120
+ request_id: str,
121
+ created_time: int,
122
+ model: str,
123
+ payload: Dict[str, Any],
124
+ timeout: Optional[int] = None,
125
+ proxies: Optional[Dict[str, str]] = None,
126
+ ) -> Generator[ChatCompletionChunk, None, None]:
127
+ try:
128
+ response = self._client.session.post(
129
+ self._client.api_endpoint,
130
+ headers=self._client.headers,
131
+ json=payload,
132
+ stream=True,
133
+ timeout=timeout or self._client.timeout,
134
+ proxies=proxies or getattr(self._client, "proxies", None),
135
+ )
136
+
137
+ if not response.ok:
138
+ raise IOError(
139
+ f"LLMChatCo API Error: {response.status_code} {response.reason} - {response.text}"
140
+ )
141
+
142
+ full_response_text = ""
143
+ current_event = None
144
+ buffer = ""
145
+
146
+ for chunk_bytes in response.iter_content(chunk_size=None, decode_unicode=False):
147
+ if not chunk_bytes:
148
+ continue
149
+
150
+ buffer += chunk_bytes.decode("utf-8", errors="replace")
151
+
152
+ while "\n" in buffer:
153
+ line, buffer = buffer.split("\n", 1)
154
+ line = line.strip()
155
+
156
+ if not line: # End of an event block
157
+ current_event = None
158
+ continue
159
+
160
+ if line.startswith("event:"):
161
+ current_event = line[len("event:") :].strip()
162
+ elif line.startswith("data:"):
163
+ data_content = line[len("data:") :].strip()
164
+ if data_content and current_event == "answer":
165
+ try:
166
+ json_data = json.loads(data_content)
167
+ answer_data = json_data.get("answer", {})
168
+ text_chunk = answer_data.get("text", "")
169
+ full_text = answer_data.get("fullText")
170
+ status = answer_data.get("status")
171
+
172
+ # Prefer fullText if available and status is COMPLETED
173
+ if full_text is not None and status == "COMPLETED":
174
+ delta_content = full_text[len(full_response_text) :]
175
+ full_response_text = full_text # Update full response tracker
176
+ elif text_chunk is not None:
177
+ # Calculate delta based on potentially partial 'text' field
178
+ delta_content = text_chunk[len(full_response_text) :]
179
+ full_response_text = text_chunk # Update full response tracker
180
+ else:
181
+ delta_content = None
182
+
183
+ if delta_content:
184
+ delta = ChoiceDelta(content=delta_content, role="assistant")
185
+ choice = Choice(index=0, delta=delta, finish_reason=None)
186
+ chunk = ChatCompletionChunk(
187
+ id=request_id,
188
+ choices=[choice],
189
+ created=created_time,
190
+ model=model,
191
+ )
192
+ yield chunk
193
+
194
+ except json.JSONDecodeError:
195
+ print(f"Warning: Could not decode JSON data line: {data_content}")
196
+ continue
197
+ elif data_content and current_event == "done":
198
+ # The 'done' event signals the end of the stream
199
+ delta = ChoiceDelta() # Empty delta
200
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
201
+ chunk = ChatCompletionChunk(
202
+ id=request_id,
203
+ choices=[choice],
204
+ created=created_time,
205
+ model=model,
206
+ )
207
+ yield chunk
208
+ return # End the generator
209
+
210
+ except requests.exceptions.RequestException as e:
211
+ print(f"Error during LLMChatCo stream request: {e}")
212
+ raise IOError(f"LLMChatCo request failed: {e}") from e
213
+ except Exception as e:
214
+ print(f"Unexpected error during LLMChatCo stream: {e}")
215
+ raise IOError(f"LLMChatCo stream processing failed: {e}") from e
216
+
217
+ # Fallback final chunk if 'done' event wasn't received properly
218
+ delta = ChoiceDelta()
219
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
220
+ chunk = ChatCompletionChunk(
221
+ id=request_id,
222
+ choices=[choice],
223
+ created=created_time,
224
+ model=model,
225
+ )
226
+ yield chunk
227
+
228
+ def _create_non_stream(
229
+ self,
230
+ request_id: str,
231
+ created_time: int,
232
+ model: str,
233
+ payload: Dict[str, Any],
234
+ timeout: Optional[int] = None,
235
+ proxies: Optional[Dict[str, str]] = None,
236
+ ) -> ChatCompletion:
237
+ # Non-streaming requires accumulating stream chunks
238
+ full_response_content = ""
239
+ finish_reason = "stop" # Assume stop unless error occurs
240
+
241
+ try:
242
+ stream_generator = self._create_stream(
243
+ request_id, created_time, model, payload, timeout, proxies
244
+ )
245
+ for chunk in stream_generator:
246
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
247
+ full_response_content += chunk.choices[0].delta.content
248
+ if chunk.choices and chunk.choices[0].finish_reason:
249
+ finish_reason = chunk.choices[0].finish_reason
250
+
251
+ except IOError as e:
252
+ print(f"Error obtaining non-stream response from LLMChatCo: {e}")
253
+ # Return a partial or error response if needed, or re-raise
254
+ # For simplicity, we'll return what we have, potentially empty
255
+ finish_reason = "error" # Indicate an issue
256
+
257
+ # Construct the final ChatCompletion object
258
+ message = ChatCompletionMessage(role="assistant", content=full_response_content)
259
+ choice = Choice(index=0, message=message, finish_reason=finish_reason)
260
+ # Usage data is not provided by this API, so set to 0
261
+ usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
262
+
263
+ completion = ChatCompletion(
264
+ id=request_id,
265
+ choices=[choice],
266
+ created=created_time,
267
+ model=model,
268
+ usage=usage,
269
+ )
270
+ return completion
271
+
272
+
273
+ class Chat(BaseChat):
274
+ def __init__(self, client: "LLMChatCo"):
275
+ self.completions = Completions(client)
276
+
277
+
278
+ class LLMChatCo(OpenAICompatibleProvider):
279
+ """
280
+ OpenAI-compatible client for LLMChat.co API.
281
+
282
+ Usage:
283
+ client = LLMChatCo()
284
+ response = client.chat.completions.create(
285
+ model="gemini-flash-2.0", # Model must be specified here
286
+ messages=[{"role": "user", "content": "Hello!"}]
287
+ )
288
+ print(response.choices[0].message.content)
289
+ """
290
+
291
+ required_auth = False # No API key required for LLMChatCo
292
+ AVAILABLE_MODELS = [
293
+ "gemini-flash-2.0", # Default model
294
+ "llama-4-scout",
295
+ "gpt-4o-mini",
296
+ # "gpt-4.1",
297
+ # "gpt-4.1-mini",
298
+ "gpt-4.1-nano",
299
+ ]
300
+
301
+ def __init__(
302
+ self,
303
+ timeout: int = 60,
304
+ browser: str = "chrome", # For User-Agent generation
305
+ ):
306
+ """
307
+ Initialize the LLMChatCo client.
308
+
309
+ Args:
310
+ timeout: Request timeout in seconds.
311
+ browser: Browser name for LitAgent to generate User-Agent.
312
+ """
313
+ # Removed model, system_prompt, proxies parameters
314
+
315
+ self.timeout = timeout
316
+ # Removed self.system_prompt assignment
317
+ self.api_endpoint = "https://llmchat.co/api/completion"
318
+ self.session = requests.Session()
319
+ self.thread_id = str(uuid.uuid4()) # Unique thread ID per client instance
320
+
321
+ # Removed proxy handling block
322
+
323
+ # Initialize LitAgent for user agent generation and fingerprinting
324
+ try:
325
+ agent = LitAgent()
326
+ fingerprint = agent.generate_fingerprint(browser=browser)
327
+ except Exception as e:
328
+ print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
329
+ # Fallback fingerprint data
330
+ fingerprint = {
331
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
332
+ "accept_language": "en-US,en;q=0.9",
333
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
334
+ "platform": "Windows",
335
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
336
+ }
337
+
338
+ # Initialize headers using the fingerprint
339
+ self.headers = {
340
+ "Accept": fingerprint["accept"],
341
+ "Accept-Language": fingerprint["accept_language"],
342
+ "Content-Type": "application/json",
343
+ "Cache-Control": "no-cache",
344
+ "Connection": "keep-alive",
345
+ "Origin": "https://llmchat.co", # Specific origin for LLMChatCo
346
+ "Pragma": "no-cache",
347
+ "Referer": f"https://llmchat.co/chat/{self.thread_id}", # Specific referer for LLMChatCo
348
+ "Sec-Fetch-Dest": "empty",
349
+ "Sec-Fetch-Mode": "cors",
350
+ "Sec-Fetch-Site": "same-origin",
351
+ "Sec-CH-UA": fingerprint["sec_ch_ua"]
352
+ or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"', # Fallback if empty
353
+ "Sec-CH-UA-Mobile": "?0",
354
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
355
+ "User-Agent": fingerprint["user_agent"],
356
+ "DNT": "1", # Added back from previous version
357
+ }
358
+ self.session.headers.update(self.headers)
359
+
360
+ # Initialize the chat interface
361
+ self.chat = Chat(self)
362
+
363
+ @property
364
+ def models(self) -> SimpleModelList:
365
+ return SimpleModelList(type(self).AVAILABLE_MODELS)
366
+
367
+
368
+ if __name__ == "__main__":
369
+ # Example usage
370
+ client = LLMChatCo()
371
+ response = client.chat.completions.create(
372
+ model="gemini-flash-2.0",
373
+ messages=[{"role": "user", "content": "Hello, how are you?"}],
374
+ stream=False,
375
+ )
376
+ if isinstance(response, ChatCompletion):
377
+ if response.choices[0].message and response.choices[0].message.content:
378
+ print(response.choices[0].message.content)