webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (413) hide show
  1. webscout/AIauto.py +524 -251
  2. webscout/AIbase.py +247 -319
  3. webscout/AIutel.py +68 -703
  4. webscout/Bard.py +1072 -1026
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +403 -232
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -684
  37. webscout/Extra/tempmail/README.md +487 -487
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +292 -333
  49. webscout/Provider/AISEARCH/README.md +106 -279
  50. webscout/Provider/AISEARCH/__init__.py +16 -9
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +357 -410
  53. webscout/Provider/AISEARCH/monica_search.py +200 -220
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -342
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +467 -340
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +175 -169
  67. webscout/Provider/GithubChat.py +385 -369
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -801
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -375
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -291
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -384
  77. webscout/Provider/Netwrck.py +273 -270
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -952
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -357
  85. webscout/Provider/OPENAI/__init__.py +148 -40
  86. webscout/Provider/OPENAI/ai4chat.py +348 -293
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
  90. webscout/Provider/OPENAI/base.py +253 -249
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +870 -556
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -173
  94. webscout/Provider/OPENAI/deepinfra.py +403 -322
  95. webscout/Provider/OPENAI/e2b.py +2370 -1414
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +452 -417
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -364
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +333 -308
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -335
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +374 -357
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -287
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +305 -304
  115. webscout/Provider/OPENAI/textpollinations.py +370 -339
  116. webscout/Provider/OPENAI/toolbaz.py +375 -413
  117. webscout/Provider/OPENAI/typefully.py +419 -355
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -318
  120. webscout/Provider/OPENAI/wisecat.py +359 -387
  121. webscout/Provider/OPENAI/writecream.py +185 -163
  122. webscout/Provider/OPENAI/x0gpt.py +462 -365
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -429
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -82
  133. webscout/Provider/TTI/__init__.py +37 -7
  134. webscout/Provider/TTI/base.py +147 -64
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -201
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -221
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -11
  141. webscout/Provider/TTS/README.md +186 -192
  142. webscout/Provider/TTS/__init__.py +43 -10
  143. webscout/Provider/TTS/base.py +523 -159
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -129
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -580
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +331 -308
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -475
  158. webscout/Provider/TypliAI.py +311 -305
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -209
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
  161. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
  162. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
  165. webscout/Provider/UNFINISHED/Youchat.py +347 -330
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
  170. webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
  171. webscout/Provider/WiseCat.py +256 -233
  172. webscout/Provider/WrDoChat.py +390 -370
  173. webscout/Provider/__init__.py +115 -174
  174. webscout/Provider/ai4chat.py +181 -174
  175. webscout/Provider/akashgpt.py +330 -335
  176. webscout/Provider/cerebras.py +397 -290
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -283
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -325
  182. webscout/Provider/llama3mitril.py +230 -215
  183. webscout/Provider/llmchat.py +308 -258
  184. webscout/Provider/llmchatco.py +321 -306
  185. webscout/Provider/meta.py +996 -801
  186. webscout/Provider/oivscode.py +332 -309
  187. webscout/Provider/searchchat.py +316 -292
  188. webscout/Provider/sonus.py +264 -258
  189. webscout/Provider/toolbaz.py +359 -353
  190. webscout/Provider/turboseek.py +332 -266
  191. webscout/Provider/typefully.py +262 -202
  192. webscout/Provider/x0gpt.py +332 -299
  193. webscout/__init__.py +31 -39
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -524
  196. webscout/client.py +1497 -70
  197. webscout/conversation.py +140 -436
  198. webscout/exceptions.py +383 -362
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +74 -420
  204. webscout/prompt_manager.py +376 -288
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -404
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -210
  210. webscout/scout/core/scout.py +800 -607
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -478
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -95
  284. webscout/swiftcli/core/__init__.py +7 -7
  285. webscout/swiftcli/core/cli.py +574 -297
  286. webscout/swiftcli/core/context.py +98 -104
  287. webscout/swiftcli/core/group.py +268 -241
  288. webscout/swiftcli/decorators/__init__.py +28 -28
  289. webscout/swiftcli/decorators/command.py +243 -221
  290. webscout/swiftcli/decorators/options.py +247 -220
  291. webscout/swiftcli/decorators/output.py +392 -252
  292. webscout/swiftcli/exceptions.py +21 -21
  293. webscout/swiftcli/plugins/__init__.py +9 -9
  294. webscout/swiftcli/plugins/base.py +134 -135
  295. webscout/swiftcli/plugins/manager.py +269 -269
  296. webscout/swiftcli/utils/__init__.py +58 -59
  297. webscout/swiftcli/utils/formatting.py +251 -252
  298. webscout/swiftcli/utils/parsing.py +368 -267
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -135
  304. webscout/zeroart/base.py +70 -66
  305. webscout/zeroart/effects.py +155 -101
  306. webscout/zeroart/fonts.py +1799 -1239
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
  311. webscout/DWEBS.py +0 -520
  312. webscout/Extra/Act.md +0 -309
  313. webscout/Extra/GitToolkit/gitapi/README.md +0 -110
  314. webscout/Extra/autocoder/__init__.py +0 -9
  315. webscout/Extra/autocoder/autocoder.py +0 -1105
  316. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  317. webscout/Extra/gguf.md +0 -430
  318. webscout/Extra/weather.md +0 -281
  319. webscout/Litlogger/README.md +0 -10
  320. webscout/Litlogger/__init__.py +0 -15
  321. webscout/Litlogger/formats.py +0 -4
  322. webscout/Litlogger/handlers.py +0 -103
  323. webscout/Litlogger/levels.py +0 -13
  324. webscout/Litlogger/logger.py +0 -92
  325. webscout/Provider/AI21.py +0 -177
  326. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  327. webscout/Provider/AISEARCH/felo_search.py +0 -202
  328. webscout/Provider/AISEARCH/genspark_search.py +0 -324
  329. webscout/Provider/AISEARCH/hika_search.py +0 -186
  330. webscout/Provider/AISEARCH/scira_search.py +0 -298
  331. webscout/Provider/Aitopia.py +0 -316
  332. webscout/Provider/AllenAI.py +0 -440
  333. webscout/Provider/Blackboxai.py +0 -791
  334. webscout/Provider/ChatGPTClone.py +0 -237
  335. webscout/Provider/ChatGPTGratis.py +0 -194
  336. webscout/Provider/Cloudflare.py +0 -324
  337. webscout/Provider/ExaChat.py +0 -358
  338. webscout/Provider/Flowith.py +0 -217
  339. webscout/Provider/FreeGemini.py +0 -250
  340. webscout/Provider/Glider.py +0 -225
  341. webscout/Provider/HF_space/__init__.py +0 -0
  342. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  343. webscout/Provider/HuggingFaceChat.py +0 -469
  344. webscout/Provider/Hunyuan.py +0 -283
  345. webscout/Provider/LambdaChat.py +0 -411
  346. webscout/Provider/Llama3.py +0 -259
  347. webscout/Provider/Nemotron.py +0 -218
  348. webscout/Provider/OLLAMA.py +0 -396
  349. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
  350. webscout/Provider/OPENAI/Cloudflare.py +0 -378
  351. webscout/Provider/OPENAI/FreeGemini.py +0 -283
  352. webscout/Provider/OPENAI/NEMOTRON.py +0 -232
  353. webscout/Provider/OPENAI/Qwen3.py +0 -283
  354. webscout/Provider/OPENAI/api.py +0 -969
  355. webscout/Provider/OPENAI/c4ai.py +0 -373
  356. webscout/Provider/OPENAI/chatgptclone.py +0 -494
  357. webscout/Provider/OPENAI/copilot.py +0 -242
  358. webscout/Provider/OPENAI/flowith.py +0 -162
  359. webscout/Provider/OPENAI/freeaichat.py +0 -359
  360. webscout/Provider/OPENAI/mcpcore.py +0 -389
  361. webscout/Provider/OPENAI/multichat.py +0 -376
  362. webscout/Provider/OPENAI/opkfc.py +0 -496
  363. webscout/Provider/OPENAI/scirachat.py +0 -477
  364. webscout/Provider/OPENAI/standardinput.py +0 -433
  365. webscout/Provider/OPENAI/typegpt.py +0 -364
  366. webscout/Provider/OPENAI/uncovrAI.py +0 -463
  367. webscout/Provider/OPENAI/venice.py +0 -431
  368. webscout/Provider/OPENAI/yep.py +0 -382
  369. webscout/Provider/OpenGPT.py +0 -209
  370. webscout/Provider/Perplexitylabs.py +0 -415
  371. webscout/Provider/Reka.py +0 -214
  372. webscout/Provider/StandardInput.py +0 -290
  373. webscout/Provider/TTI/aiarta.py +0 -365
  374. webscout/Provider/TTI/artbit.py +0 -0
  375. webscout/Provider/TTI/fastflux.py +0 -200
  376. webscout/Provider/TTI/piclumen.py +0 -203
  377. webscout/Provider/TTI/pixelmuse.py +0 -225
  378. webscout/Provider/TTS/gesserit.py +0 -128
  379. webscout/Provider/TTS/sthir.py +0 -94
  380. webscout/Provider/TeachAnything.py +0 -229
  381. webscout/Provider/UNFINISHED/puterjs.py +0 -635
  382. webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
  383. webscout/Provider/Venice.py +0 -258
  384. webscout/Provider/VercelAI.py +0 -253
  385. webscout/Provider/Writecream.py +0 -246
  386. webscout/Provider/WritingMate.py +0 -269
  387. webscout/Provider/asksteve.py +0 -220
  388. webscout/Provider/chatglm.py +0 -215
  389. webscout/Provider/copilot.py +0 -425
  390. webscout/Provider/freeaichat.py +0 -285
  391. webscout/Provider/granite.py +0 -235
  392. webscout/Provider/hermes.py +0 -266
  393. webscout/Provider/koala.py +0 -170
  394. webscout/Provider/lmarena.py +0 -198
  395. webscout/Provider/multichat.py +0 -364
  396. webscout/Provider/scira_chat.py +0 -299
  397. webscout/Provider/scnet.py +0 -243
  398. webscout/Provider/talkai.py +0 -194
  399. webscout/Provider/typegpt.py +0 -289
  400. webscout/Provider/uncovr.py +0 -368
  401. webscout/Provider/yep.py +0 -389
  402. webscout/litagent/Readme.md +0 -276
  403. webscout/litprinter/__init__.py +0 -59
  404. webscout/swiftcli/Readme.md +0 -323
  405. webscout/tempid.py +0 -128
  406. webscout/webscout_search.py +0 -1184
  407. webscout/webscout_search_async.py +0 -654
  408. webscout/yep_search.py +0 -347
  409. webscout/zeroart/README.md +0 -89
  410. webscout-8.2.9.dist-info/METADATA +0 -1033
  411. webscout-8.2.9.dist-info/RECORD +0 -289
  412. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
  413. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
@@ -1,335 +1,378 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage, get_last_user_message, get_system_prompt, format_prompt # Import format_prompt
12
- )
13
-
14
- # Attempt to import LitAgent, fallback if not available
15
- try:
16
- from webscout.litagent import LitAgent
17
- except ImportError:
18
- # Define a dummy LitAgent if webscout is not installed or accessible
19
- class LitAgent:
20
- def random(self) -> str:
21
- return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
22
-
23
- # --- LLMChatCo Client ---
24
-
25
- class Completions(BaseCompletions):
26
- def __init__(self, client: 'LLMChatCo'):
27
- self._client = client
28
-
29
- def create(
30
- self,
31
- *,
32
- model: str, # Model is now mandatory per request
33
- messages: List[Dict[str, str]],
34
- max_tokens: Optional[int] = 2048, # Note: LLMChatCo doesn't seem to use max_tokens directly in payload
35
- stream: bool = False,
36
- temperature: Optional[float] = None, # Note: LLMChatCo doesn't seem to use temperature directly in payload
37
- top_p: Optional[float] = None, # Note: LLMChatCo doesn't seem to use top_p directly in payload
38
- web_search: bool = False, # LLMChatCo specific parameter
39
- system_prompt: Optional[str] = "You are a helpful assistant.", # Default system prompt if not provided
40
- **kwargs: Any
41
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
- """
43
- Creates a model response for the given chat conversation.
44
- Mimics openai.chat.completions.create
45
- """
46
- if model not in self._client.AVAILABLE_MODELS:
47
- # Raise error as model is mandatory and must be valid for this provider
48
- raise ValueError(f"Model '{model}' not supported by LLMChatCo. Available: {self._client.AVAILABLE_MODELS}")
49
- actual_model = model
50
-
51
- # Determine the effective system prompt
52
- effective_system_prompt = system_prompt # Use the provided system_prompt or its default
53
- message_list_system_prompt = get_system_prompt(messages)
54
- # If a system prompt is also in messages, the explicit one takes precedence.
55
- # We'll use the effective_system_prompt determined above.
56
-
57
- # Prepare final messages list, ensuring only one system message at the start
58
- final_messages = []
59
- if effective_system_prompt:
60
- final_messages.append({"role": "system", "content": effective_system_prompt})
61
- final_messages.extend([msg for msg in messages if msg.get("role") != "system"])
62
-
63
- # Extract the last user prompt using the utility function for the separate 'prompt' field
64
- last_user_prompt = get_last_user_message(final_messages)
65
-
66
- # Note: format_prompt is not directly used here as the API requires the structured 'messages' list
67
- # and a separate 'prompt' field, rather than a single formatted string.
68
-
69
- # Generate a unique ID for this message
70
- thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
71
-
72
- payload = {
73
- "mode": actual_model,
74
- "prompt": last_user_prompt, # LLMChatCo seems to require the last prompt separately
75
- "threadId": self._client.thread_id,
76
- "messages": final_messages, # Use the reconstructed final_messages list
77
- "mcpConfig": {}, # Keep structure as observed
78
- "threadItemId": thread_item_id,
79
- "parentThreadItemId": "", # Assuming no parent for simplicity
80
- "webSearch": web_search,
81
- "showSuggestions": True # Keep structure as observed
82
- }
83
-
84
- # Add any extra kwargs to the payload if needed, though LLMChatCo seems limited
85
- payload.update(kwargs)
86
-
87
- request_id = f"chatcmpl-{uuid.uuid4()}"
88
- created_time = int(time.time())
89
-
90
- if stream:
91
- return self._create_stream(request_id, created_time, actual_model, payload)
92
- else:
93
- return self._create_non_stream(request_id, created_time, actual_model, payload)
94
-
95
- def _create_stream(
96
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
97
- ) -> Generator[ChatCompletionChunk, None, None]:
98
- try:
99
- response = self._client.session.post(
100
- self._client.api_endpoint,
101
- headers=self._client.headers,
102
- json=payload,
103
- stream=True,
104
- timeout=self._client.timeout
105
- )
106
-
107
- if not response.ok:
108
- raise IOError(
109
- f"LLMChatCo API Error: {response.status_code} {response.reason} - {response.text}"
110
- )
111
-
112
- full_response_text = ""
113
- current_event = None
114
- buffer = ""
115
-
116
- for chunk_bytes in response.iter_content(chunk_size=None, decode_unicode=False):
117
- if not chunk_bytes:
118
- continue
119
-
120
- buffer += chunk_bytes.decode('utf-8', errors='replace')
121
-
122
- while '\n' in buffer:
123
- line, buffer = buffer.split('\n', 1)
124
- line = line.strip()
125
-
126
- if not line: # End of an event block
127
- current_event = None
128
- continue
129
-
130
- if line.startswith('event:'):
131
- current_event = line[len('event:'):].strip()
132
- elif line.startswith('data:'):
133
- data_content = line[len('data:'):].strip()
134
- if data_content and current_event == 'answer':
135
- try:
136
- json_data = json.loads(data_content)
137
- answer_data = json_data.get("answer", {})
138
- text_chunk = answer_data.get("text", "")
139
- full_text = answer_data.get("fullText")
140
- status = answer_data.get("status")
141
-
142
- # Prefer fullText if available and status is COMPLETED
143
- if full_text is not None and status == "COMPLETED":
144
- delta_content = full_text[len(full_response_text):]
145
- full_response_text = full_text # Update full response tracker
146
- elif text_chunk is not None:
147
- # Calculate delta based on potentially partial 'text' field
148
- delta_content = text_chunk[len(full_response_text):]
149
- full_response_text = text_chunk # Update full response tracker
150
- else:
151
- delta_content = None
152
-
153
- if delta_content:
154
- delta = ChoiceDelta(content=delta_content, role="assistant")
155
- choice = Choice(index=0, delta=delta, finish_reason=None)
156
- chunk = ChatCompletionChunk(
157
- id=request_id,
158
- choices=[choice],
159
- created=created_time,
160
- model=model,
161
- )
162
- yield chunk
163
-
164
- except json.JSONDecodeError:
165
- print(f"Warning: Could not decode JSON data line: {data_content}")
166
- continue
167
- elif data_content and current_event == 'done':
168
- # The 'done' event signals the end of the stream
169
- delta = ChoiceDelta() # Empty delta
170
- choice = Choice(index=0, delta=delta, finish_reason="stop")
171
- chunk = ChatCompletionChunk(
172
- id=request_id,
173
- choices=[choice],
174
- created=created_time,
175
- model=model,
176
- )
177
- yield chunk
178
- return # End the generator
179
-
180
- except requests.exceptions.RequestException as e:
181
- print(f"Error during LLMChatCo stream request: {e}")
182
- raise IOError(f"LLMChatCo request failed: {e}") from e
183
- except Exception as e:
184
- print(f"Unexpected error during LLMChatCo stream: {e}")
185
- raise IOError(f"LLMChatCo stream processing failed: {e}") from e
186
-
187
- # Fallback final chunk if 'done' event wasn't received properly
188
- delta = ChoiceDelta()
189
- choice = Choice(index=0, delta=delta, finish_reason="stop")
190
- chunk = ChatCompletionChunk(
191
- id=request_id,
192
- choices=[choice],
193
- created=created_time,
194
- model=model,
195
- )
196
- yield chunk
197
-
198
-
199
- def _create_non_stream(
200
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
201
- ) -> ChatCompletion:
202
- # Non-streaming requires accumulating stream chunks
203
- full_response_content = ""
204
- finish_reason = "stop" # Assume stop unless error occurs
205
-
206
- try:
207
- stream_generator = self._create_stream(request_id, created_time, model, payload)
208
- for chunk in stream_generator:
209
- if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
210
- full_response_content += chunk.choices[0].delta.content
211
- if chunk.choices and chunk.choices[0].finish_reason:
212
- finish_reason = chunk.choices[0].finish_reason
213
-
214
- except IOError as e:
215
- print(f"Error obtaining non-stream response from LLMChatCo: {e}")
216
- # Return a partial or error response if needed, or re-raise
217
- # For simplicity, we'll return what we have, potentially empty
218
- finish_reason = "error" # Indicate an issue
219
-
220
- # Construct the final ChatCompletion object
221
- message = ChatCompletionMessage(
222
- role="assistant",
223
- content=full_response_content
224
- )
225
- choice = Choice(
226
- index=0,
227
- message=message,
228
- finish_reason=finish_reason
229
- )
230
- # Usage data is not provided by this API, so set to 0
231
- usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
232
-
233
- completion = ChatCompletion(
234
- id=request_id,
235
- choices=[choice],
236
- created=created_time,
237
- model=model,
238
- usage=usage,
239
- )
240
- return completion
241
-
242
- class Chat(BaseChat):
243
- def __init__(self, client: 'LLMChatCo'):
244
- self.completions = Completions(client)
245
-
246
- class LLMChatCo(OpenAICompatibleProvider):
247
- """
248
- OpenAI-compatible client for LLMChat.co API.
249
-
250
- Usage:
251
- client = LLMChatCo()
252
- response = client.chat.completions.create(
253
- model="gemini-flash-2.0", # Model must be specified here
254
- messages=[{"role": "user", "content": "Hello!"}]
255
- )
256
- print(response.choices[0].message.content)
257
- """
258
- AVAILABLE_MODELS = [
259
- "gemini-flash-2.0", # Default model
260
- "llama-4-scout",
261
- "gpt-4o-mini",
262
- # "gpt-4.1",
263
- # "gpt-4.1-mini",
264
- "gpt-4.1-nano",
265
- ]
266
-
267
- def __init__(
268
- self,
269
- timeout: int = 60,
270
- browser: str = "chrome" # For User-Agent generation
271
- ):
272
- """
273
- Initialize the LLMChatCo client.
274
-
275
- Args:
276
- timeout: Request timeout in seconds.
277
- browser: Browser name for LitAgent to generate User-Agent.
278
- """
279
- # Removed model, system_prompt, proxies parameters
280
-
281
- self.timeout = timeout
282
- # Removed self.system_prompt assignment
283
- self.api_endpoint = "https://llmchat.co/api/completion"
284
- self.session = requests.Session()
285
- self.thread_id = str(uuid.uuid4()) # Unique thread ID per client instance
286
-
287
- # Removed proxy handling block
288
-
289
- # Initialize LitAgent for user agent generation and fingerprinting
290
- try:
291
- agent = LitAgent()
292
- fingerprint = agent.generate_fingerprint(browser=browser)
293
- except Exception as e:
294
- print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
295
- # Fallback fingerprint data
296
- fingerprint = {
297
- "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
298
- "accept_language": "en-US,en;q=0.9",
299
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
300
- "platform": "Windows",
301
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
302
- }
303
-
304
- # Initialize headers using the fingerprint
305
- self.headers = {
306
- "Accept": fingerprint["accept"],
307
- "Accept-Encoding": "gzip, deflate, br, zstd", # Standard encoding
308
- "Accept-Language": fingerprint["accept_language"],
309
- "Content-Type": "application/json",
310
- "Cache-Control": "no-cache",
311
- "Connection": "keep-alive",
312
- "Origin": "https://llmchat.co", # Specific origin for LLMChatCo
313
- "Pragma": "no-cache",
314
- "Referer": f"https://llmchat.co/chat/{self.thread_id}", # Specific referer for LLMChatCo
315
- "Sec-Fetch-Dest": "empty",
316
- "Sec-Fetch-Mode": "cors",
317
- "Sec-Fetch-Site": "same-origin",
318
- "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"', # Fallback if empty
319
- "Sec-CH-UA-Mobile": "?0",
320
- "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
321
- "User-Agent": fingerprint["user_agent"],
322
- "DNT": "1", # Added back from previous version
323
- }
324
- self.session.headers.update(self.headers)
325
-
326
- # Initialize the chat interface
327
- self.chat = Chat(self)
328
-
329
- @property
330
- def models(self):
331
- class _ModelList:
332
- def list(inner_self):
333
- return type(self).AVAILABLE_MODELS
334
- return _ModelList()
335
-
1
+ import json
2
+ import time
3
+ import uuid
4
+ from typing import Any, Dict, Generator, List, Optional, Union, cast
5
+
6
+ import requests
7
+
8
+ # Import base classes and utility structures
9
+ from webscout.Provider.OPENAI.base import (
10
+ BaseChat,
11
+ BaseCompletions,
12
+ OpenAICompatibleProvider,
13
+ SimpleModelList,
14
+ )
15
+ from webscout.Provider.OPENAI.utils import (
16
+ ChatCompletion,
17
+ ChatCompletionChunk,
18
+ ChatCompletionMessage, # Import format_prompt
19
+ Choice,
20
+ ChoiceDelta,
21
+ CompletionUsage,
22
+ get_last_user_message,
23
+ get_system_prompt,
24
+ )
25
+
26
+ # Attempt to import LitAgent, fallback if not available
27
+ from ...litagent import LitAgent
28
+
29
+ # --- LLMChatCo Client ---
30
+
31
+
32
+ class Completions(BaseCompletions):
33
+ def __init__(self, client: "LLMChatCo"):
34
+ self._client = client
35
+
36
+ def create(
37
+ self,
38
+ *,
39
+ model: str, # Model is now mandatory per request
40
+ messages: List[Dict[str, str]],
41
+ max_tokens: Optional[
42
+ int
43
+ ] = 2048, # Note: LLMChatCo doesn't seem to use max_tokens directly in payload
44
+ stream: bool = False,
45
+ temperature: Optional[
46
+ float
47
+ ] = None, # Note: LLMChatCo doesn't seem to use temperature directly in payload
48
+ top_p: Optional[
49
+ float
50
+ ] = None, # Note: LLMChatCo doesn't seem to use top_p directly in payload
51
+ web_search: bool = False, # LLMChatCo specific parameter
52
+ system_prompt: Optional[
53
+ str
54
+ ] = "You are a helpful assistant.", # Default system prompt if not provided
55
+ timeout: Optional[int] = None,
56
+ proxies: Optional[Dict[str, str]] = None,
57
+ **kwargs: Any,
58
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
59
+ """
60
+ Creates a model response for the given chat conversation.
61
+ Mimics openai.chat.completions.create
62
+ """
63
+ if model not in self._client.AVAILABLE_MODELS:
64
+ # Raise error as model is mandatory and must be valid for this provider
65
+ raise ValueError(
66
+ f"Model '{model}' not supported by LLMChatCo. Available: {self._client.AVAILABLE_MODELS}"
67
+ )
68
+ actual_model = model
69
+
70
+ # Determine the effective system prompt
71
+ effective_system_prompt = system_prompt # Use the provided system_prompt or its default
72
+ get_system_prompt(messages)
73
+ # If a system prompt is also in messages, the explicit one takes precedence.
74
+ # We'll use the effective_system_prompt determined above.
75
+
76
+ # Prepare final messages list, ensuring only one system message at the start
77
+ final_messages = []
78
+ if effective_system_prompt:
79
+ final_messages.append({"role": "system", "content": effective_system_prompt})
80
+ final_messages.extend([msg for msg in messages if msg.get("role") != "system"])
81
+
82
+ # Extract the last user prompt using the utility function for the separate 'prompt' field
83
+ last_user_prompt = get_last_user_message(final_messages)
84
+
85
+ # Note: format_prompt is not directly used here as the API requires the structured 'messages' list
86
+ # and a separate 'prompt' field, rather than a single formatted string.
87
+
88
+ # Generate a unique ID for this message
89
+ thread_item_id = "".join(str(uuid.uuid4()).split("-"))[:20]
90
+
91
+ payload = {
92
+ "mode": actual_model,
93
+ "prompt": last_user_prompt, # LLMChatCo seems to require the last prompt separately
94
+ "threadId": self._client.thread_id,
95
+ "messages": final_messages, # Use the reconstructed final_messages list
96
+ "mcpConfig": {}, # Keep structure as observed
97
+ "threadItemId": thread_item_id,
98
+ "parentThreadItemId": "", # Assuming no parent for simplicity
99
+ "webSearch": web_search,
100
+ "showSuggestions": True, # Keep structure as observed
101
+ }
102
+
103
+ # Add any extra kwargs to the payload if needed, though LLMChatCo seems limited
104
+ payload.update(kwargs)
105
+
106
+ request_id = f"chatcmpl-{uuid.uuid4()}"
107
+ created_time = int(time.time())
108
+
109
+ if stream:
110
+ return self._create_stream(
111
+ request_id, created_time, actual_model, payload, timeout, proxies
112
+ )
113
+ else:
114
+ return self._create_non_stream(
115
+ request_id, created_time, actual_model, payload, timeout, proxies
116
+ )
117
+
118
+ def _create_stream(
119
+ self,
120
+ request_id: str,
121
+ created_time: int,
122
+ model: str,
123
+ payload: Dict[str, Any],
124
+ timeout: Optional[int] = None,
125
+ proxies: Optional[Dict[str, str]] = None,
126
+ ) -> Generator[ChatCompletionChunk, None, None]:
127
+ try:
128
+ response = self._client.session.post(
129
+ self._client.api_endpoint,
130
+ headers=self._client.headers,
131
+ json=payload,
132
+ stream=True,
133
+ timeout=timeout or self._client.timeout,
134
+ proxies=proxies or getattr(self._client, "proxies", None),
135
+ )
136
+
137
+ if not response.ok:
138
+ raise IOError(
139
+ f"LLMChatCo API Error: {response.status_code} {response.reason} - {response.text}"
140
+ )
141
+
142
+ full_response_text = ""
143
+ current_event = None
144
+ buffer = ""
145
+
146
+ for chunk_bytes in response.iter_content(chunk_size=None, decode_unicode=False):
147
+ if not chunk_bytes:
148
+ continue
149
+
150
+ buffer += chunk_bytes.decode("utf-8", errors="replace")
151
+
152
+ while "\n" in buffer:
153
+ line, buffer = buffer.split("\n", 1)
154
+ line = line.strip()
155
+
156
+ if not line: # End of an event block
157
+ current_event = None
158
+ continue
159
+
160
+ if line.startswith("event:"):
161
+ current_event = line[len("event:") :].strip()
162
+ elif line.startswith("data:"):
163
+ data_content = line[len("data:") :].strip()
164
+ if data_content and current_event == "answer":
165
+ try:
166
+ json_data = json.loads(data_content)
167
+ answer_data = json_data.get("answer", {})
168
+ text_chunk = answer_data.get("text", "")
169
+ full_text = answer_data.get("fullText")
170
+ status = answer_data.get("status")
171
+
172
+ # Prefer fullText if available and status is COMPLETED
173
+ if full_text is not None and status == "COMPLETED":
174
+ delta_content = full_text[len(full_response_text) :]
175
+ full_response_text = full_text # Update full response tracker
176
+ elif text_chunk is not None:
177
+ # Calculate delta based on potentially partial 'text' field
178
+ delta_content = text_chunk[len(full_response_text) :]
179
+ full_response_text = text_chunk # Update full response tracker
180
+ else:
181
+ delta_content = None
182
+
183
+ if delta_content:
184
+ delta = ChoiceDelta(content=delta_content, role="assistant")
185
+ choice = Choice(index=0, delta=delta, finish_reason=None)
186
+ chunk = ChatCompletionChunk(
187
+ id=request_id,
188
+ choices=[choice],
189
+ created=created_time,
190
+ model=model,
191
+ )
192
+ yield chunk
193
+
194
+ except json.JSONDecodeError:
195
+ print(f"Warning: Could not decode JSON data line: {data_content}")
196
+ continue
197
+ elif data_content and current_event == "done":
198
+ # The 'done' event signals the end of the stream
199
+ delta = ChoiceDelta() # Empty delta
200
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
201
+ chunk = ChatCompletionChunk(
202
+ id=request_id,
203
+ choices=[choice],
204
+ created=created_time,
205
+ model=model,
206
+ )
207
+ yield chunk
208
+ return # End the generator
209
+
210
+ except requests.exceptions.RequestException as e:
211
+ print(f"Error during LLMChatCo stream request: {e}")
212
+ raise IOError(f"LLMChatCo request failed: {e}") from e
213
+ except Exception as e:
214
+ print(f"Unexpected error during LLMChatCo stream: {e}")
215
+ raise IOError(f"LLMChatCo stream processing failed: {e}") from e
216
+
217
+ # Fallback final chunk if 'done' event wasn't received properly
218
+ delta = ChoiceDelta()
219
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
220
+ chunk = ChatCompletionChunk(
221
+ id=request_id,
222
+ choices=[choice],
223
+ created=created_time,
224
+ model=model,
225
+ )
226
+ yield chunk
227
+
228
+ def _create_non_stream(
229
+ self,
230
+ request_id: str,
231
+ created_time: int,
232
+ model: str,
233
+ payload: Dict[str, Any],
234
+ timeout: Optional[int] = None,
235
+ proxies: Optional[Dict[str, str]] = None,
236
+ ) -> ChatCompletion:
237
+ # Non-streaming requires accumulating stream chunks
238
+ full_response_content = ""
239
+ finish_reason = "stop" # Assume stop unless error occurs
240
+
241
+ try:
242
+ stream_generator = self._create_stream(
243
+ request_id, created_time, model, payload, timeout, proxies
244
+ )
245
+ for chunk in stream_generator:
246
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
247
+ full_response_content += chunk.choices[0].delta.content
248
+ if chunk.choices and chunk.choices[0].finish_reason:
249
+ finish_reason = chunk.choices[0].finish_reason
250
+
251
+ except IOError as e:
252
+ print(f"Error obtaining non-stream response from LLMChatCo: {e}")
253
+ # Return a partial or error response if needed, or re-raise
254
+ # For simplicity, we'll return what we have, potentially empty
255
+ finish_reason = "error" # Indicate an issue
256
+
257
+ # Construct the final ChatCompletion object
258
+ message = ChatCompletionMessage(role="assistant", content=full_response_content)
259
+ choice = Choice(index=0, message=message, finish_reason=finish_reason)
260
+ # Usage data is not provided by this API, so set to 0
261
+ usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
262
+
263
+ completion = ChatCompletion(
264
+ id=request_id,
265
+ choices=[choice],
266
+ created=created_time,
267
+ model=model,
268
+ usage=usage,
269
+ )
270
+ return completion
271
+
272
+
273
+ class Chat(BaseChat):
274
+ def __init__(self, client: "LLMChatCo"):
275
+ self.completions = Completions(client)
276
+
277
+
278
+ class LLMChatCo(OpenAICompatibleProvider):
279
+ """
280
+ OpenAI-compatible client for LLMChat.co API.
281
+
282
+ Usage:
283
+ client = LLMChatCo()
284
+ response = client.chat.completions.create(
285
+ model="gemini-flash-2.0", # Model must be specified here
286
+ messages=[{"role": "user", "content": "Hello!"}]
287
+ )
288
+ print(response.choices[0].message.content)
289
+ """
290
+
291
+ required_auth = False # No API key required for LLMChatCo
292
+ AVAILABLE_MODELS = [
293
+ "gemini-flash-2.0", # Default model
294
+ "llama-4-scout",
295
+ "gpt-4o-mini",
296
+ # "gpt-4.1",
297
+ # "gpt-4.1-mini",
298
+ "gpt-4.1-nano",
299
+ ]
300
+
301
+ def __init__(
302
+ self,
303
+ timeout: int = 60,
304
+ browser: str = "chrome", # For User-Agent generation
305
+ ):
306
+ """
307
+ Initialize the LLMChatCo client.
308
+
309
+ Args:
310
+ timeout: Request timeout in seconds.
311
+ browser: Browser name for LitAgent to generate User-Agent.
312
+ """
313
+ # Removed model, system_prompt, proxies parameters
314
+
315
+ self.timeout = timeout
316
+ # Removed self.system_prompt assignment
317
+ self.api_endpoint = "https://llmchat.co/api/completion"
318
+ self.session = requests.Session()
319
+ self.thread_id = str(uuid.uuid4()) # Unique thread ID per client instance
320
+
321
+ # Removed proxy handling block
322
+
323
+ # Initialize LitAgent for user agent generation and fingerprinting
324
+ try:
325
+ agent = LitAgent()
326
+ fingerprint = agent.generate_fingerprint(browser=browser)
327
+ except Exception as e:
328
+ print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
329
+ # Fallback fingerprint data
330
+ fingerprint = {
331
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
332
+ "accept_language": "en-US,en;q=0.9",
333
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
334
+ "platform": "Windows",
335
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
336
+ }
337
+
338
+ # Initialize headers using the fingerprint
339
+ self.headers = {
340
+ "Accept": fingerprint["accept"],
341
+ "Accept-Language": fingerprint["accept_language"],
342
+ "Content-Type": "application/json",
343
+ "Cache-Control": "no-cache",
344
+ "Connection": "keep-alive",
345
+ "Origin": "https://llmchat.co", # Specific origin for LLMChatCo
346
+ "Pragma": "no-cache",
347
+ "Referer": f"https://llmchat.co/chat/{self.thread_id}", # Specific referer for LLMChatCo
348
+ "Sec-Fetch-Dest": "empty",
349
+ "Sec-Fetch-Mode": "cors",
350
+ "Sec-Fetch-Site": "same-origin",
351
+ "Sec-CH-UA": fingerprint["sec_ch_ua"]
352
+ or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"', # Fallback if empty
353
+ "Sec-CH-UA-Mobile": "?0",
354
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
355
+ "User-Agent": fingerprint["user_agent"],
356
+ "DNT": "1", # Added back from previous version
357
+ }
358
+ self.session.headers.update(self.headers)
359
+
360
+ # Initialize the chat interface
361
+ self.chat = Chat(self)
362
+
363
+ @property
364
+ def models(self) -> SimpleModelList:
365
+ return SimpleModelList(type(self).AVAILABLE_MODELS)
366
+
367
+
368
+ if __name__ == "__main__":
369
+ # Example usage
370
+ client = LLMChatCo()
371
+ response = client.chat.completions.create(
372
+ model="gemini-flash-2.0",
373
+ messages=[{"role": "user", "content": "Hello, how are you?"}],
374
+ stream=False,
375
+ )
376
+ if isinstance(response, ChatCompletion):
377
+ if response.choices[0].message and response.choices[0].message.content:
378
+ print(response.choices[0].message.content)