webscout 8.2.2__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (483) hide show
  1. webscout/AIauto.py +524 -143
  2. webscout/AIbase.py +247 -123
  3. webscout/AIutel.py +68 -132
  4. webscout/Bard.py +1072 -535
  5. webscout/Extra/GitToolkit/__init__.py +2 -2
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -0
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -0
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -45
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +189 -18
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -682
  37. webscout/Extra/tempmail/README.md +488 -0
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +237 -304
  49. webscout/Provider/AISEARCH/README.md +106 -0
  50. webscout/Provider/AISEARCH/__init__.py +16 -10
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +130 -209
  53. webscout/Provider/AISEARCH/monica_search.py +200 -246
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -281
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -0
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +343 -173
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +60 -54
  67. webscout/Provider/GithubChat.py +385 -367
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -670
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -233
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -266
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -381
  77. webscout/Provider/Netwrck.py +273 -228
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -0
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -0
  85. webscout/Provider/OPENAI/__init__.py +148 -25
  86. webscout/Provider/OPENAI/ai4chat.py +348 -0
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/ayle.py +365 -0
  90. webscout/Provider/OPENAI/base.py +253 -46
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +514 -193
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -0
  94. webscout/Provider/OPENAI/deepinfra.py +403 -272
  95. webscout/Provider/OPENAI/e2b.py +2370 -1350
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +186 -138
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -0
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +100 -104
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -327
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +110 -84
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -0
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -0
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +126 -115
  115. webscout/Provider/OPENAI/textpollinations.py +218 -133
  116. webscout/Provider/OPENAI/toolbaz.py +136 -166
  117. webscout/Provider/OPENAI/typefully.py +419 -0
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -211
  120. webscout/Provider/OPENAI/wisecat.py +103 -125
  121. webscout/Provider/OPENAI/writecream.py +185 -156
  122. webscout/Provider/OPENAI/x0gpt.py +227 -136
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -344
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -0
  133. webscout/Provider/TTI/__init__.py +37 -12
  134. webscout/Provider/TTI/base.py +147 -0
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -0
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -0
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -0
  141. webscout/Provider/TTS/README.md +186 -0
  142. webscout/Provider/TTS/__init__.py +43 -7
  143. webscout/Provider/TTS/base.py +523 -0
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -0
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -180
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +221 -121
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -199
  158. webscout/Provider/TypliAI.py +311 -0
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -0
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +160 -145
  161. webscout/Provider/UNFINISHED/GizAI.py +300 -0
  162. webscout/Provider/UNFINISHED/Marcus.py +218 -0
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/UNFINISHED/XenAI.py +330 -0
  165. webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +64 -47
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -0
  170. webscout/Provider/UNFINISHED/samurai.py +231 -0
  171. webscout/Provider/WiseCat.py +256 -196
  172. webscout/Provider/WrDoChat.py +390 -0
  173. webscout/Provider/__init__.py +115 -198
  174. webscout/Provider/ai4chat.py +181 -202
  175. webscout/Provider/akashgpt.py +330 -342
  176. webscout/Provider/cerebras.py +397 -242
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -234
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -266
  182. webscout/Provider/llama3mitril.py +230 -180
  183. webscout/Provider/llmchat.py +308 -213
  184. webscout/Provider/llmchatco.py +321 -311
  185. webscout/Provider/meta.py +996 -794
  186. webscout/Provider/oivscode.py +332 -0
  187. webscout/Provider/searchchat.py +316 -293
  188. webscout/Provider/sonus.py +264 -208
  189. webscout/Provider/toolbaz.py +359 -320
  190. webscout/Provider/turboseek.py +332 -219
  191. webscout/Provider/typefully.py +262 -280
  192. webscout/Provider/x0gpt.py +332 -256
  193. webscout/__init__.py +31 -38
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -293
  196. webscout/client.py +1497 -0
  197. webscout/conversation.py +140 -565
  198. webscout/exceptions.py +383 -339
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +32 -378
  204. webscout/prompt_manager.py +376 -274
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -0
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -140
  210. webscout/scout/core/scout.py +800 -568
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -460
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -809
  284. webscout/swiftcli/core/__init__.py +7 -0
  285. webscout/swiftcli/core/cli.py +574 -0
  286. webscout/swiftcli/core/context.py +98 -0
  287. webscout/swiftcli/core/group.py +268 -0
  288. webscout/swiftcli/decorators/__init__.py +28 -0
  289. webscout/swiftcli/decorators/command.py +243 -0
  290. webscout/swiftcli/decorators/options.py +247 -0
  291. webscout/swiftcli/decorators/output.py +392 -0
  292. webscout/swiftcli/exceptions.py +21 -0
  293. webscout/swiftcli/plugins/__init__.py +9 -0
  294. webscout/swiftcli/plugins/base.py +134 -0
  295. webscout/swiftcli/plugins/manager.py +269 -0
  296. webscout/swiftcli/utils/__init__.py +58 -0
  297. webscout/swiftcli/utils/formatting.py +251 -0
  298. webscout/swiftcli/utils/parsing.py +368 -0
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -55
  304. webscout/zeroart/base.py +70 -60
  305. webscout/zeroart/effects.py +155 -99
  306. webscout/zeroart/fonts.py +1799 -816
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. webscout-2026.1.19.dist-info/entry_points.txt +4 -0
  311. webscout-2026.1.19.dist-info/top_level.txt +1 -0
  312. inferno/__init__.py +0 -6
  313. inferno/__main__.py +0 -9
  314. inferno/cli.py +0 -6
  315. webscout/DWEBS.py +0 -477
  316. webscout/Extra/autocoder/__init__.py +0 -9
  317. webscout/Extra/autocoder/autocoder.py +0 -849
  318. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  319. webscout/LLM.py +0 -442
  320. webscout/Litlogger/__init__.py +0 -67
  321. webscout/Litlogger/core/__init__.py +0 -6
  322. webscout/Litlogger/core/level.py +0 -23
  323. webscout/Litlogger/core/logger.py +0 -165
  324. webscout/Litlogger/handlers/__init__.py +0 -12
  325. webscout/Litlogger/handlers/console.py +0 -33
  326. webscout/Litlogger/handlers/file.py +0 -143
  327. webscout/Litlogger/handlers/network.py +0 -173
  328. webscout/Litlogger/styles/__init__.py +0 -7
  329. webscout/Litlogger/styles/colors.py +0 -249
  330. webscout/Litlogger/styles/formats.py +0 -458
  331. webscout/Litlogger/styles/text.py +0 -87
  332. webscout/Litlogger/utils/__init__.py +0 -6
  333. webscout/Litlogger/utils/detectors.py +0 -153
  334. webscout/Litlogger/utils/formatters.py +0 -200
  335. webscout/Local/__init__.py +0 -12
  336. webscout/Local/__main__.py +0 -9
  337. webscout/Local/api.py +0 -576
  338. webscout/Local/cli.py +0 -516
  339. webscout/Local/config.py +0 -75
  340. webscout/Local/llm.py +0 -287
  341. webscout/Local/model_manager.py +0 -253
  342. webscout/Local/server.py +0 -721
  343. webscout/Local/utils.py +0 -93
  344. webscout/Provider/AI21.py +0 -177
  345. webscout/Provider/AISEARCH/DeepFind.py +0 -250
  346. webscout/Provider/AISEARCH/ISou.py +0 -256
  347. webscout/Provider/AISEARCH/felo_search.py +0 -228
  348. webscout/Provider/AISEARCH/genspark_search.py +0 -208
  349. webscout/Provider/AISEARCH/hika_search.py +0 -194
  350. webscout/Provider/AISEARCH/scira_search.py +0 -324
  351. webscout/Provider/Aitopia.py +0 -292
  352. webscout/Provider/AllenAI.py +0 -413
  353. webscout/Provider/Blackboxai.py +0 -229
  354. webscout/Provider/C4ai.py +0 -432
  355. webscout/Provider/ChatGPTClone.py +0 -226
  356. webscout/Provider/ChatGPTES.py +0 -237
  357. webscout/Provider/ChatGPTGratis.py +0 -194
  358. webscout/Provider/Chatify.py +0 -175
  359. webscout/Provider/Cloudflare.py +0 -273
  360. webscout/Provider/DeepSeek.py +0 -196
  361. webscout/Provider/ElectronHub.py +0 -709
  362. webscout/Provider/ExaChat.py +0 -342
  363. webscout/Provider/Free2GPT.py +0 -241
  364. webscout/Provider/GPTWeb.py +0 -193
  365. webscout/Provider/Glider.py +0 -211
  366. webscout/Provider/HF_space/__init__.py +0 -0
  367. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  368. webscout/Provider/HuggingFaceChat.py +0 -462
  369. webscout/Provider/Hunyuan.py +0 -272
  370. webscout/Provider/LambdaChat.py +0 -392
  371. webscout/Provider/Llama.py +0 -200
  372. webscout/Provider/Llama3.py +0 -204
  373. webscout/Provider/Marcus.py +0 -148
  374. webscout/Provider/OLLAMA.py +0 -396
  375. webscout/Provider/OPENAI/c4ai.py +0 -367
  376. webscout/Provider/OPENAI/chatgptclone.py +0 -460
  377. webscout/Provider/OPENAI/exachat.py +0 -433
  378. webscout/Provider/OPENAI/freeaichat.py +0 -352
  379. webscout/Provider/OPENAI/opkfc.py +0 -488
  380. webscout/Provider/OPENAI/scirachat.py +0 -463
  381. webscout/Provider/OPENAI/standardinput.py +0 -425
  382. webscout/Provider/OPENAI/typegpt.py +0 -346
  383. webscout/Provider/OPENAI/uncovrAI.py +0 -455
  384. webscout/Provider/OPENAI/venice.py +0 -413
  385. webscout/Provider/OPENAI/yep.py +0 -327
  386. webscout/Provider/OpenGPT.py +0 -199
  387. webscout/Provider/Perplexitylabs.py +0 -415
  388. webscout/Provider/Phind.py +0 -535
  389. webscout/Provider/PizzaGPT.py +0 -198
  390. webscout/Provider/Reka.py +0 -214
  391. webscout/Provider/StandardInput.py +0 -278
  392. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  393. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  394. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  395. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  396. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  397. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  398. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  399. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  400. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  401. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  402. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  403. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  404. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  405. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  406. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  407. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  408. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  409. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  410. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  411. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  412. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  413. webscout/Provider/TTI/artbit/__init__.py +0 -22
  414. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  415. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  416. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  417. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  418. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  419. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  420. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  421. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  422. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  423. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  424. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  425. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  426. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  427. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  428. webscout/Provider/TTI/talkai/__init__.py +0 -4
  429. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  430. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  431. webscout/Provider/TTS/gesserit.py +0 -127
  432. webscout/Provider/TeachAnything.py +0 -187
  433. webscout/Provider/Venice.py +0 -219
  434. webscout/Provider/VercelAI.py +0 -234
  435. webscout/Provider/WebSim.py +0 -228
  436. webscout/Provider/Writecream.py +0 -211
  437. webscout/Provider/WritingMate.py +0 -197
  438. webscout/Provider/aimathgpt.py +0 -189
  439. webscout/Provider/askmyai.py +0 -158
  440. webscout/Provider/asksteve.py +0 -203
  441. webscout/Provider/bagoodex.py +0 -145
  442. webscout/Provider/chatglm.py +0 -205
  443. webscout/Provider/copilot.py +0 -428
  444. webscout/Provider/freeaichat.py +0 -271
  445. webscout/Provider/gaurish.py +0 -244
  446. webscout/Provider/geminiprorealtime.py +0 -160
  447. webscout/Provider/granite.py +0 -187
  448. webscout/Provider/hermes.py +0 -219
  449. webscout/Provider/koala.py +0 -268
  450. webscout/Provider/labyrinth.py +0 -340
  451. webscout/Provider/lepton.py +0 -194
  452. webscout/Provider/llamatutor.py +0 -192
  453. webscout/Provider/multichat.py +0 -325
  454. webscout/Provider/promptrefine.py +0 -193
  455. webscout/Provider/scira_chat.py +0 -277
  456. webscout/Provider/scnet.py +0 -187
  457. webscout/Provider/talkai.py +0 -194
  458. webscout/Provider/tutorai.py +0 -252
  459. webscout/Provider/typegpt.py +0 -232
  460. webscout/Provider/uncovr.py +0 -312
  461. webscout/Provider/yep.py +0 -376
  462. webscout/litprinter/__init__.py +0 -59
  463. webscout/scout/core.py +0 -881
  464. webscout/tempid.py +0 -128
  465. webscout/webscout_search.py +0 -1346
  466. webscout/webscout_search_async.py +0 -877
  467. webscout/yep_search.py +0 -297
  468. webscout-8.2.2.dist-info/METADATA +0 -734
  469. webscout-8.2.2.dist-info/RECORD +0 -309
  470. webscout-8.2.2.dist-info/entry_points.txt +0 -5
  471. webscout-8.2.2.dist-info/top_level.txt +0 -3
  472. webstoken/__init__.py +0 -30
  473. webstoken/classifier.py +0 -189
  474. webstoken/keywords.py +0 -216
  475. webstoken/language.py +0 -128
  476. webstoken/ner.py +0 -164
  477. webstoken/normalizer.py +0 -35
  478. webstoken/processor.py +0 -77
  479. webstoken/sentiment.py +0 -206
  480. webstoken/stemmer.py +0 -73
  481. webstoken/tagger.py +0 -60
  482. webstoken/tokenizer.py +0 -158
  483. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info/licenses}/LICENSE.md +0 -0
webscout/client.py ADDED
@@ -0,0 +1,1497 @@
1
+ """
2
+ Webscout Unified Client Interface
3
+
4
+ A unified client for webscout that provides a simple interface
5
+ to interact with multiple AI providers for chat completions and image generation.
6
+
7
+ Features:
8
+ - Automatic provider failover
9
+ - Support for specifying exact provider
10
+ - Intelligent model resolution (auto, provider/model, or model name)
11
+ - Caching of provider instances
12
+ - Full streaming support
13
+ """
14
+
15
+ import difflib
16
+ import importlib
17
+ import inspect
18
+ import pkgutil
19
+ import random
20
+ from typing import Any, Dict, Generator, List, Optional, Set, Tuple, Type, Union, cast
21
+
22
+ from webscout.Provider.OPENAI.base import (
23
+ BaseChat,
24
+ BaseCompletions,
25
+ OpenAICompatibleProvider,
26
+ Tool,
27
+ )
28
+ from webscout.Provider.OPENAI.utils import (
29
+ ChatCompletion,
30
+ ChatCompletionChunk,
31
+ )
32
+ from webscout.Provider.TTI.base import BaseImages, TTICompatibleProvider
33
+ from webscout.Provider.TTI.utils import ImageResponse
34
+
35
+
36
+ def load_openai_providers() -> Tuple[Dict[str, Type[OpenAICompatibleProvider]], Set[str]]:
37
+ """
38
+ Dynamically loads all OpenAI-compatible provider classes from the OPENAI module.
39
+
40
+ Scans the webscout.Provider.OPENAI package and imports all subclasses of
41
+ OpenAICompatibleProvider. Excludes base classes, utility modules, and private classes.
42
+
43
+ Returns:
44
+ A tuple containing:
45
+ - A dictionary mapping provider class names to their class objects.
46
+ - A set of provider names that require API authentication.
47
+
48
+ Raises:
49
+ No exceptions are raised; failures are silently handled to ensure robust loading.
50
+
51
+ Examples:
52
+ >>> providers, auth_required = load_openai_providers()
53
+ >>> print(list(providers.keys())[:3])
54
+ ['Claude', 'GPT4Free', 'OpenRouter']
55
+ >>> print('Claude' in auth_required)
56
+ True
57
+ """
58
+ provider_map = {}
59
+ auth_required_providers = set()
60
+
61
+ try:
62
+ provider_package = importlib.import_module("webscout.Provider.OPENAI")
63
+ for _, module_name, _ in pkgutil.iter_modules(provider_package.__path__):
64
+ if module_name.startswith(("base", "utils", "pydantic", "__")):
65
+ continue
66
+ try:
67
+ module = importlib.import_module(f"webscout.Provider.OPENAI.{module_name}")
68
+ for attr_name in dir(module):
69
+ attr = getattr(module, attr_name)
70
+ if (
71
+ isinstance(attr, type)
72
+ and issubclass(attr, OpenAICompatibleProvider)
73
+ and attr != OpenAICompatibleProvider
74
+ and not attr_name.startswith(("Base", "_"))
75
+ ):
76
+ provider_map[attr_name] = attr
77
+ if hasattr(attr, "required_auth") and attr.required_auth:
78
+ auth_required_providers.add(attr_name)
79
+ except Exception:
80
+ pass
81
+ except Exception:
82
+ pass
83
+ return provider_map, auth_required_providers
84
+
85
+
86
+ def load_tti_providers() -> Tuple[Dict[str, Type[TTICompatibleProvider]], Set[str]]:
87
+ """
88
+ Dynamically loads all TTI (Text-to-Image) provider classes from the TTI module.
89
+
90
+ Scans the webscout.Provider.TTI package and imports all subclasses of
91
+ TTICompatibleProvider. Excludes base classes, utility modules, and private classes.
92
+
93
+ Returns:
94
+ A tuple containing:
95
+ - A dictionary mapping TTI provider class names to their class objects.
96
+ - A set of TTI provider names that require API authentication.
97
+
98
+ Raises:
99
+ No exceptions are raised; failures are silently handled to ensure robust loading.
100
+
101
+ Examples:
102
+ >>> providers, auth_required = load_tti_providers()
103
+ >>> print('DALL-E' in providers)
104
+ True
105
+ >>> print('Stable Diffusion' in auth_required)
106
+ False
107
+ """
108
+ provider_map = {}
109
+ auth_required_providers = set()
110
+
111
+ try:
112
+ provider_package = importlib.import_module("webscout.Provider.TTI")
113
+ for _, module_name, _ in pkgutil.iter_modules(provider_package.__path__):
114
+ if module_name.startswith(("base", "utils", "__")):
115
+ continue
116
+ try:
117
+ module = importlib.import_module(f"webscout.Provider.TTI.{module_name}")
118
+ for attr_name in dir(module):
119
+ attr = getattr(module, attr_name)
120
+ if (
121
+ isinstance(attr, type)
122
+ and issubclass(attr, TTICompatibleProvider)
123
+ and attr != TTICompatibleProvider
124
+ and not attr_name.startswith(("Base", "_"))
125
+ ):
126
+ provider_map[attr_name] = attr
127
+ if hasattr(attr, "required_auth") and attr.required_auth:
128
+ auth_required_providers.add(attr_name)
129
+ except Exception:
130
+ pass
131
+ except Exception:
132
+ pass
133
+ return provider_map, auth_required_providers
134
+
135
+
136
+ OPENAI_PROVIDERS, OPENAI_AUTH_REQUIRED = load_openai_providers()
137
+ TTI_PROVIDERS, TTI_AUTH_REQUIRED = load_tti_providers()
138
+
139
+
140
+ def _get_models_safely(provider_cls: type, client: Optional["Client"] = None) -> List[str]:
141
+ """
142
+ Safely retrieves the list of available models from a provider.
143
+
144
+ Attempts to instantiate the provider class and call its models.list() method.
145
+ If a Client instance is provided, uses the client's provider cache to avoid
146
+ redundant instantiations. Handles all exceptions gracefully and returns an
147
+ empty list if model retrieval fails.
148
+
149
+ Args:
150
+ provider_cls: The provider class to retrieve models from.
151
+ client: Optional Client instance to use for caching and configuration.
152
+ If provided, uses client's proxies and api_key for initialization.
153
+
154
+ Returns:
155
+ A list of available model identifiers (strings). Returns an empty list
156
+ if the provider has no models or if instantiation fails.
157
+
158
+ Note:
159
+ This function silently handles all exceptions and will not raise errors.
160
+ Model names are extracted from both string lists and dicts with 'id' keys.
161
+
162
+ Examples:
163
+ >>> from webscout.client import _get_models_safely, Client
164
+ >>> client = Client()
165
+ >>> from webscout.Provider.OPENAI.some_provider import SomeProvider
166
+ >>> models = _get_models_safely(SomeProvider, client)
167
+ >>> print(models)
168
+ ['gpt-4', 'gpt-3.5-turbo']
169
+ """
170
+ models = []
171
+
172
+ try:
173
+ instance = None
174
+ if client:
175
+ p_name = provider_cls.__name__
176
+ if p_name in client._provider_cache:
177
+ instance = client._provider_cache[p_name]
178
+ else:
179
+ try:
180
+ init_kwargs = {}
181
+ if client.proxies:
182
+ init_kwargs["proxies"] = client.proxies
183
+ if client.api_key:
184
+ init_kwargs["api_key"] = client.api_key
185
+ instance = provider_cls(**init_kwargs)
186
+ except Exception:
187
+ try:
188
+ instance = provider_cls()
189
+ except Exception:
190
+ pass
191
+
192
+ if instance:
193
+ client._provider_cache[p_name] = instance
194
+ else:
195
+ try:
196
+ instance = provider_cls()
197
+ except Exception:
198
+ pass
199
+
200
+ if instance and hasattr(instance, "models") and hasattr(instance.models, "list"):
201
+ res = instance.models.list()
202
+ if isinstance(res, list):
203
+ for m in res:
204
+ if isinstance(m, str):
205
+ models.append(m)
206
+ elif isinstance(m, dict) and "id" in m:
207
+ models.append(m["id"])
208
+ except Exception:
209
+ pass
210
+
211
+ return models
212
+
213
+
214
+ class ClientCompletions(BaseCompletions):
215
+ """
216
+ Unified completions interface with intelligent provider and model resolution.
217
+
218
+ This class manages chat completions by automatically selecting appropriate
219
+ providers and models based on user input. It supports:
220
+ - Automatic model discovery and fuzzy matching
221
+ - Provider failover for reliability
222
+ - Provider and model caching for performance
223
+ - Streaming and non-streaming responses
224
+ - Tools and function calling support
225
+
226
+ Attributes:
227
+ _client: Reference to the parent Client instance.
228
+ _last_provider: Name of the last successfully used provider.
229
+
230
+ Examples:
231
+ >>> from webscout.client import Client
232
+ >>> client = Client(print_provider_info=True)
233
+ >>> response = client.chat.completions.create(
234
+ ... model="auto",
235
+ ... messages=[{"role": "user", "content": "Hello!"}]
236
+ ... )
237
+ """
238
+
239
+ def __init__(self, client: "Client"):
240
+ self._client = client
241
+ self._last_provider: Optional[str] = None
242
+
243
+ @property
244
+ def last_provider(self) -> Optional[str]:
245
+ """
246
+ Returns the name of the last successfully used provider.
247
+
248
+ This property tracks which provider was most recently used to generate
249
+ a completion. Useful for debugging and understanding which fallback
250
+ providers are being utilized.
251
+
252
+ Returns:
253
+ The name of the last provider as a string, or None if no provider
254
+ has been successfully used yet.
255
+
256
+ Examples:
257
+ >>> completions = client.chat.completions
258
+ >>> response = completions.create(model="auto", messages=[...])
259
+ >>> print(completions.last_provider)
260
+ 'GPT4Free'
261
+ """
262
+ return self._last_provider
263
+
264
+ def _get_provider_instance(
265
+ self, provider_class: Type[OpenAICompatibleProvider], **kwargs
266
+ ) -> OpenAICompatibleProvider:
267
+ """
268
+ Retrieves or creates a cached provider instance.
269
+
270
+ Checks if a provider instance already exists in the client's cache.
271
+ If not, initializes a new instance with client-level configuration
272
+ (proxies, api_key) merged with any additional kwargs.
273
+
274
+ Args:
275
+ provider_class: The OpenAI-compatible provider class to instantiate.
276
+ **kwargs: Additional keyword arguments to pass to the provider's constructor.
277
+
278
+ Returns:
279
+ An instantiated and initialized provider instance.
280
+
281
+ Raises:
282
+ RuntimeError: If the provider cannot be initialized with or without
283
+ client configuration.
284
+
285
+ Examples:
286
+ >>> from webscout.Provider.OPENAI.gpt4free import GPT4Free
287
+ >>> completions = client.chat.completions
288
+ >>> instance = completions._get_provider_instance(GPT4Free)
289
+ """
290
+ p_name = provider_class.__name__
291
+ if p_name in self._client._provider_cache:
292
+ return self._client._provider_cache[p_name]
293
+
294
+ init_kwargs = {}
295
+ if self._client.proxies:
296
+ init_kwargs["proxies"] = self._client.proxies
297
+ if self._client.api_key:
298
+ init_kwargs["api_key"] = self._client.api_key
299
+ init_kwargs.update(kwargs)
300
+
301
+ try:
302
+ instance = provider_class(**init_kwargs)
303
+ self._client._provider_cache[p_name] = instance
304
+ return instance
305
+ except Exception:
306
+ try:
307
+ instance = provider_class()
308
+ self._client._provider_cache[p_name] = instance
309
+ return instance
310
+ except Exception as e:
311
+ raise RuntimeError(f"Failed to initialize provider {provider_class.__name__}: {e}")
312
+
313
+ def _fuzzy_resolve_provider_and_model(
314
+ self, model: str
315
+ ) -> Optional[Tuple[Type[OpenAICompatibleProvider], str]]:
316
+ """
317
+ Performs fuzzy matching to find the closest model match across all providers.
318
+
319
+ Attempts three levels of matching:
320
+ 1. Exact case-insensitive match
321
+ 2. Substring match (model contains query or vice versa)
322
+ 3. Fuzzy match using difflib with 50% cutoff
323
+
324
+ Args:
325
+ model: The model name or partial name to search for.
326
+
327
+ Returns:
328
+ A tuple of (provider_class, resolved_model_name) if a match is found,
329
+ or None if no suitable match is found.
330
+
331
+ Note:
332
+ Prints informational messages if client.print_provider_info is enabled.
333
+
334
+ Examples:
335
+ >>> result = completions._fuzzy_resolve_provider_and_model("gpt-4")
336
+ >>> if result:
337
+ ... provider_cls, model_name = result
338
+ ... print(f"Found: {model_name} via {provider_cls.__name__}")
339
+ """
340
+ available = self._get_available_providers()
341
+ model_to_provider = {}
342
+
343
+ for p_name, p_cls in available:
344
+ p_models = _get_models_safely(p_cls, self._client)
345
+ for m in p_models:
346
+ if m not in model_to_provider:
347
+ model_to_provider[m] = p_cls
348
+
349
+ if not model_to_provider:
350
+ return None
351
+
352
+ # 1. Exact case-insensitive match
353
+ for m_name in model_to_provider:
354
+ if m_name.lower() == model.lower():
355
+ return model_to_provider[m_name], m_name
356
+
357
+ # 2. Substring match
358
+ for m_name in model_to_provider:
359
+ if model.lower() in m_name.lower() or m_name.lower() in model.lower():
360
+ if self._client.print_provider_info:
361
+ print(f"\033[1;33mSubstring match: '{model}' -> '{m_name}'\033[0m")
362
+ return model_to_provider[m_name], m_name
363
+
364
+ # 3. Fuzzy match with difflib
365
+ matches = difflib.get_close_matches(model, model_to_provider.keys(), n=1, cutoff=0.5)
366
+ if matches:
367
+ matched_model = matches[0]
368
+ if self._client.print_provider_info:
369
+ print(f"\033[1;33mFuzzy match: '{model}' -> '{matched_model}'\033[0m")
370
+ return model_to_provider[matched_model], matched_model
371
+ return None
372
+
373
+ def _resolve_provider_and_model(
374
+ self, model: str, provider: Optional[Type[OpenAICompatibleProvider]]
375
+ ) -> Tuple[Type[OpenAICompatibleProvider], str]:
376
+ """
377
+ Resolves the best provider and model name based on input specifications.
378
+
379
+ Handles multiple input formats:
380
+ - "provider/model" format: Parses and resolves to exact provider
381
+ - "auto": Randomly selects an available provider and model
382
+ - Named model: Searches across all providers for exact or fuzzy match
383
+
384
+ Resolution strategy:
385
+ 1. If "provider/model" format, find provider by name
386
+ 2. If provider specified, use it with given or auto-selected model
387
+ 3. If "auto", randomly select from available providers and models
388
+ 4. Otherwise, search across providers for exact match
389
+ 5. Fall back to fuzzy matching
390
+ 6. Finally, randomly select from available providers
391
+
392
+ Args:
393
+ model: Model identifier. Can be "auto", "provider/model", or model name.
394
+ provider: Optional provider class to constrain resolution.
395
+
396
+ Returns:
397
+ A tuple of (provider_class, resolved_model_name).
398
+
399
+ Raises:
400
+ RuntimeError: If no providers are available or model cannot be resolved.
401
+
402
+ Examples:
403
+ >>> # Auto resolution
404
+ >>> p_cls, m_name = completions._resolve_provider_and_model("auto", None)
405
+ >>> # Explicit provider/model
406
+ >>> p_cls, m_name = completions._resolve_provider_and_model(
407
+ ... "GPT4Free/gpt-3.5-turbo", None
408
+ ... )
409
+ >>> # Model name fuzzy matching
410
+ >>> p_cls, m_name = completions._resolve_provider_and_model("gpt-4", None)
411
+ """
412
+ if "/" in model:
413
+ p_name, m_name = model.split("/", 1)
414
+ found_p = next(
415
+ (cls for name, cls in OPENAI_PROVIDERS.items() if name.lower() == p_name.lower()),
416
+ None,
417
+ )
418
+ if found_p:
419
+ return found_p, m_name
420
+
421
+ if provider:
422
+ resolved_model = model
423
+ if model == "auto":
424
+ p_models = _get_models_safely(provider, self._client)
425
+ if p_models:
426
+ resolved_model = random.choice(p_models)
427
+ else:
428
+ raise RuntimeError(f"Provider {provider.__name__} has no available models.")
429
+ return provider, resolved_model
430
+
431
+ if model == "auto":
432
+ available = self._get_available_providers()
433
+ if not available:
434
+ raise RuntimeError("No available chat providers found.")
435
+
436
+ providers_with_models = []
437
+ for name, cls in available:
438
+ p_models = _get_models_safely(cls, self._client)
439
+ if p_models:
440
+ providers_with_models.append((cls, p_models))
441
+
442
+ if providers_with_models:
443
+ p_cls, p_models = random.choice(providers_with_models)
444
+ m_name = random.choice(p_models)
445
+ return p_cls, m_name
446
+ else:
447
+ raise RuntimeError("No available chat providers with models found.")
448
+
449
+ available = self._get_available_providers()
450
+ for p_name, p_cls in available:
451
+ p_models = _get_models_safely(p_cls, self._client)
452
+ if p_models and model in p_models:
453
+ return p_cls, model
454
+
455
+ fuzzy_result = self._fuzzy_resolve_provider_and_model(model)
456
+ if fuzzy_result:
457
+ return fuzzy_result
458
+
459
+ if available:
460
+ random.shuffle(available)
461
+ return available[0][1], model
462
+
463
+ raise RuntimeError(f"No providers found for model '{model}'")
464
+
465
+ def _get_available_providers(self) -> List[Tuple[str, Type[OpenAICompatibleProvider]]]:
466
+ """
467
+ Returns a list of available chat providers for the current client configuration.
468
+
469
+ Filters the global provider registry based on:
470
+ - Client's exclude list
471
+ - API key availability (if api_key is set, includes auth-required providers)
472
+
473
+ Returns:
474
+ A list of tuples containing (provider_name, provider_class) pairs
475
+ for all available providers.
476
+
477
+ Examples:
478
+ >>> providers = completions._get_available_providers()
479
+ >>> print([p[0] for p in providers])
480
+ ['GPT4Free', 'OpenRouter', 'Groq']
481
+ """
482
+ exclude = set(self._client.exclude or [])
483
+ if self._client.api_key:
484
+ return [(name, cls) for name, cls in OPENAI_PROVIDERS.items() if name not in exclude]
485
+ return [
486
+ (name, cls)
487
+ for name, cls in OPENAI_PROVIDERS.items()
488
+ if name not in OPENAI_AUTH_REQUIRED and name not in exclude
489
+ ]
490
+
491
+ def create(
492
+ self,
493
+ *,
494
+ model: str = "auto",
495
+ messages: List[Dict[str, Any]],
496
+ max_tokens: Optional[int] = None,
497
+ stream: bool = False,
498
+ temperature: Optional[float] = None,
499
+ top_p: Optional[float] = None,
500
+ tools: Optional[List[Union[Tool, Dict[str, Any]]]] = None,
501
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
502
+ timeout: Optional[int] = None,
503
+ proxies: Optional[dict] = None,
504
+ provider: Optional[Type[OpenAICompatibleProvider]] = None,
505
+ **kwargs: Any,
506
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
507
+ """
508
+ Creates a chat completion with automatic provider selection and failover.
509
+
510
+ Attempts to resolve the specified model to a provider and model name,
511
+ then creates a completion. If the initial attempt fails, automatically
512
+ falls back to other available providers, prioritizing:
513
+ 1. Providers with exact model matches
514
+ 2. Providers with fuzzy model matches
515
+ 3. Providers with any available model
516
+
517
+ Args:
518
+ model: Model identifier. Default "auto" randomly selects available models.
519
+ Can be "provider/model" format or model name. Required.
520
+ messages: List of message dicts with 'role' and 'content' keys. Required.
521
+ max_tokens: Maximum tokens in the response. Optional.
522
+ stream: Whether to stream the response. Default is False.
523
+ temperature: Sampling temperature (0-2). Controls response randomness. Optional.
524
+ top_p: Nucleus sampling parameter (0-1). Optional.
525
+ tools: List of tools or tool definitions for function calling. Optional.
526
+ tool_choice: Which tool to use or how to select tools. Optional.
527
+ timeout: Request timeout in seconds. Optional.
528
+ proxies: HTTP proxy configuration dict. Optional.
529
+ provider: Specific provider class to use. Optional.
530
+ **kwargs: Additional arguments passed to the provider.
531
+
532
+ Returns:
533
+ ChatCompletion object for non-streaming requests.
534
+ Generator[ChatCompletionChunk, None, None] for streaming requests.
535
+
536
+ Raises:
537
+ RuntimeError: If all chat providers fail or no providers are available.
538
+
539
+ Note:
540
+ If print_provider_info is True, provider name and model are printed
541
+ to stdout in color-formatted text. Streaming responses print on first chunk.
542
+
543
+ Examples:
544
+ >>> client = Client(print_provider_info=True)
545
+ >>> response = client.chat.completions.create(
546
+ ... model="gpt-4",
547
+ ... messages=[{"role": "user", "content": "Hello!"}]
548
+ ... )
549
+ >>> print(response.choices[0].message.content)
550
+
551
+ >>> # Streaming example
552
+ >>> for chunk in client.chat.completions.create(
553
+ ... model="auto",
554
+ ... messages=[{"role": "user", "content": "Hello!"}],
555
+ ... stream=True
556
+ ... ):
557
+ ... print(chunk.choices[0].delta.content, end="")
558
+ """
559
+ try:
560
+ resolved_provider, resolved_model = self._resolve_provider_and_model(model, provider)
561
+ except Exception:
562
+ resolved_provider, resolved_model = None, model
563
+
564
+ call_kwargs = {
565
+ "model": resolved_model,
566
+ "messages": messages,
567
+ "stream": stream,
568
+ }
569
+ if max_tokens is not None:
570
+ call_kwargs["max_tokens"] = max_tokens
571
+ if temperature is not None:
572
+ call_kwargs["temperature"] = temperature
573
+ if top_p is not None:
574
+ call_kwargs["top_p"] = top_p
575
+ if tools is not None:
576
+ call_kwargs["tools"] = tools
577
+ if tool_choice is not None:
578
+ call_kwargs["tool_choice"] = tool_choice
579
+ if timeout is not None:
580
+ call_kwargs["timeout"] = timeout
581
+ if proxies is not None:
582
+ call_kwargs["proxies"] = proxies
583
+ call_kwargs.update(kwargs)
584
+
585
+ if resolved_provider:
586
+ try:
587
+ provider_instance = self._get_provider_instance(resolved_provider)
588
+ response = provider_instance.chat.completions.create(
589
+ **cast(Dict[str, Any], call_kwargs)
590
+ )
591
+
592
+ if stream and inspect.isgenerator(response):
593
+ try:
594
+ first_chunk = next(response)
595
+ self._last_provider = resolved_provider.__name__
596
+
597
+ def _chained_gen_stream(
598
+ first: ChatCompletionChunk,
599
+ rest: Generator[ChatCompletionChunk, None, None],
600
+ pname: str,
601
+ ) -> Generator[ChatCompletionChunk, None, None]:
602
+ if self._client.print_provider_info:
603
+ print(f"\033[1;34m{pname}:{resolved_model}\033[0m\n")
604
+ yield first
605
+ yield from rest
606
+
607
+ return _chained_gen_stream(
608
+ first_chunk, response, resolved_provider.__name__
609
+ )
610
+ except StopIteration:
611
+ pass
612
+ except Exception:
613
+ pass
614
+ else:
615
+ # Type narrowing for non-streaming response
616
+ if not inspect.isgenerator(response):
617
+ completion_response = cast(ChatCompletion, response)
618
+ if (
619
+ completion_response
620
+ and hasattr(completion_response, "choices")
621
+ and completion_response.choices
622
+ and completion_response.choices[0].message
623
+ and completion_response.choices[0].message.content
624
+ and completion_response.choices[0].message.content.strip()
625
+ ):
626
+ self._last_provider = resolved_provider.__name__
627
+ if self._client.print_provider_info:
628
+ print(
629
+ f"\033[1;34m{resolved_provider.__name__}:{resolved_model}\033[0m\n"
630
+ )
631
+ return completion_response
632
+ else:
633
+ raise ValueError(
634
+ f"Provider {resolved_provider.__name__} returned empty content"
635
+ )
636
+ except Exception:
637
+ pass
638
+
639
+ all_available = self._get_available_providers()
640
+ tier1, tier2, tier3 = [], [], []
641
+ base_model = model.split("/")[-1] if "/" in model else model
642
+ search_models = {base_model, resolved_model} if resolved_model else {base_model}
643
+
644
+ for p_name, p_cls in all_available:
645
+ if p_cls == resolved_provider:
646
+ continue
647
+
648
+ p_models = _get_models_safely(p_cls, self._client)
649
+ if not p_models:
650
+ fallback_model = (
651
+ base_model
652
+ if base_model != "auto"
653
+ else (p_models[0] if p_models else base_model)
654
+ )
655
+ tier3.append((p_name, p_cls, fallback_model))
656
+ continue
657
+
658
+ found_exact = False
659
+ for sm in search_models:
660
+ if sm != "auto" and sm in p_models:
661
+ tier1.append((p_name, p_cls, sm))
662
+ found_exact = True
663
+ break
664
+ if found_exact:
665
+ continue
666
+
667
+ if base_model != "auto":
668
+ matches = difflib.get_close_matches(base_model, p_models, n=1, cutoff=0.5)
669
+ if matches:
670
+ tier2.append((p_name, p_cls, matches[0]))
671
+ continue
672
+
673
+ tier3.append((p_name, p_cls, random.choice(p_models)))
674
+
675
+ random.shuffle(tier1)
676
+ random.shuffle(tier2)
677
+ random.shuffle(tier3)
678
+ fallback_queue = tier1 + tier2 + tier3
679
+
680
+ errors = []
681
+ for p_name, p_cls, p_model in fallback_queue:
682
+ try:
683
+ provider_instance = self._get_provider_instance(p_cls)
684
+ fallback_kwargs = cast(
685
+ Dict[str, Any], {**call_kwargs, "model": p_model}
686
+ )
687
+ response = provider_instance.chat.completions.create(**fallback_kwargs)
688
+
689
+ if stream and inspect.isgenerator(response):
690
+ try:
691
+ first_chunk = next(response)
692
+ self._last_provider = p_name
693
+
694
+ def _chained_gen_fallback(first, rest, pname, mname):
695
+ if self._client.print_provider_info:
696
+ print(f"\033[1;34m{pname}:{mname} (Fallback)\033[0m\n")
697
+ yield first
698
+ yield from rest
699
+
700
+ return _chained_gen_fallback(first_chunk, response, p_name, p_model)
701
+ except (StopIteration, Exception):
702
+ continue
703
+
704
+ if not inspect.isgenerator(response):
705
+ completion_response = cast(ChatCompletion, response)
706
+ if (
707
+ completion_response
708
+ and hasattr(completion_response, "choices")
709
+ and completion_response.choices
710
+ and completion_response.choices[0].message
711
+ and completion_response.choices[0].message.content
712
+ and completion_response.choices[0].message.content.strip()
713
+ ):
714
+ self._last_provider = p_name
715
+ if self._client.print_provider_info:
716
+ print(f"\033[1;34m{p_name}:{p_model} (Fallback)\033[0m\n")
717
+ return completion_response
718
+ else:
719
+ errors.append(f"{p_name}: Returned empty response.")
720
+ continue
721
+ except Exception as e:
722
+ errors.append(f"{p_name}: {str(e)}")
723
+ continue
724
+
725
+ raise RuntimeError(f"All chat providers failed. Errors: {'; '.join(errors[:3])}")
726
+
727
+
728
+ class ClientChat(BaseChat):
729
+ """
730
+ Standard chat interface wrapper for the Client.
731
+
732
+ Provides access to chat completions through a completions property that
733
+ implements the BaseChat interface. Acts as an adapter between the Client
734
+ and the underlying OpenAI-compatible completion system.
735
+
736
+ Attributes:
737
+ completions: ClientCompletions instance for creating chat completions.
738
+
739
+ Examples:
740
+ >>> chat = client.chat
741
+ >>> response = chat.completions.create(
742
+ ... model="auto",
743
+ ... messages=[{"role": "user", "content": "Hi"}]
744
+ ... )
745
+ """
746
+
747
+ def __init__(self, client: "Client"):
748
+ self.completions = ClientCompletions(client)
749
+
750
+
751
+ class ClientImages(BaseImages):
752
+ """
753
+ Unified image generation interface with automatic provider selection and failover.
754
+
755
+ Manages text-to-image (TTI) generation by automatically selecting appropriate
756
+ providers and models based on user input. Implements similar resolution and
757
+ failover logic as ClientCompletions but for image generation.
758
+
759
+ Features:
760
+ - Automatic model discovery and fuzzy matching
761
+ - Provider failover for reliability
762
+ - Provider and model caching for performance
763
+ - Structured parameter validation
764
+ - Support for multiple image output formats
765
+
766
+ Attributes:
767
+ _client: Reference to the parent Client instance.
768
+ _last_provider: Name of the last successfully used image provider.
769
+
770
+ Examples:
771
+ >>> client = Client(print_provider_info=True)
772
+ >>> response = client.images.generate(
773
+ ... prompt="A beautiful sunset",
774
+ ... model="auto",
775
+ ... n=1,
776
+ ... size="1024x1024"
777
+ ... )
778
+ >>> print(response.data[0].url)
779
+ """
780
+
781
+ def __init__(self, client: "Client"):
782
+ self._client = client
783
+ self._last_provider: Optional[str] = None
784
+
785
+ @property
786
+ def last_provider(self) -> Optional[str]:
787
+ """
788
+ Returns the name of the last successfully used image provider.
789
+
790
+ Tracks which TTI provider was most recently used to generate images.
791
+ Useful for debugging and understanding which fallback providers are
792
+ being utilized.
793
+
794
+ Returns:
795
+ The name of the last provider as a string, or None if no provider
796
+ has been successfully used yet.
797
+
798
+ Examples:
799
+ >>> images = client.images
800
+ >>> response = images.generate(prompt="...", model="auto")
801
+ >>> print(images.last_provider)
802
+ 'StableDiffusion'
803
+ """
804
+ return self._last_provider
805
+
806
+ def _get_provider_instance(
807
+ self, provider_class: Type[TTICompatibleProvider], **kwargs
808
+ ) -> TTICompatibleProvider:
809
+ """
810
+ Retrieves or creates a cached TTI provider instance.
811
+
812
+ Checks if a TTI provider instance already exists in the client's cache.
813
+ If not, initializes a new instance with client-level configuration
814
+ (proxies) merged with any additional kwargs.
815
+
816
+ Args:
817
+ provider_class: The TTI-compatible provider class to instantiate.
818
+ **kwargs: Additional keyword arguments to pass to the provider's constructor.
819
+
820
+ Returns:
821
+ An instantiated and initialized TTI provider instance.
822
+
823
+ Raises:
824
+ RuntimeError: If the provider cannot be initialized with or without
825
+ client configuration.
826
+
827
+ Examples:
828
+ >>> from webscout.Provider.TTI.dalle import DALLE
829
+ >>> images = client.images
830
+ >>> instance = images._get_provider_instance(DALLE)
831
+ """
832
+ p_name = provider_class.__name__
833
+ if p_name in self._client._provider_cache:
834
+ return self._client._provider_cache[p_name]
835
+
836
+ init_kwargs = {}
837
+ if self._client.proxies:
838
+ init_kwargs["proxies"] = self._client.proxies
839
+ init_kwargs.update(kwargs)
840
+
841
+ try:
842
+ instance = provider_class(**init_kwargs)
843
+ self._client._provider_cache[p_name] = instance
844
+ return instance
845
+ except Exception:
846
+ try:
847
+ instance = provider_class()
848
+ self._client._provider_cache[p_name] = instance
849
+ return instance
850
+ except Exception as e:
851
+ raise RuntimeError(
852
+ f"Failed to initialize TTI provider {provider_class.__name__}: {e}"
853
+ )
854
+
855
+ def _fuzzy_resolve_provider_and_model(
856
+ self, model: str
857
+ ) -> Optional[Tuple[Type[TTICompatibleProvider], str]]:
858
+ """
859
+ Performs fuzzy matching to find the closest image model match across providers.
860
+
861
+ Attempts three levels of matching:
862
+ 1. Exact case-insensitive match
863
+ 2. Substring match (model contains query or vice versa)
864
+ 3. Fuzzy match using difflib with 50% cutoff
865
+
866
+ Args:
867
+ model: The model name or partial name to search for.
868
+
869
+ Returns:
870
+ A tuple of (provider_class, resolved_model_name) if a match is found,
871
+ or None if no suitable match is found.
872
+
873
+ Note:
874
+ Prints informational messages if client.print_provider_info is enabled.
875
+
876
+ Examples:
877
+ >>> result = images._fuzzy_resolve_provider_and_model("dall-e")
878
+ >>> if result:
879
+ ... provider_cls, model_name = result
880
+ ... print(f"Found: {model_name} via {provider_cls.__name__}")
881
+ """
882
+ available = self._get_available_providers()
883
+ model_to_provider = {}
884
+
885
+ for p_name, p_cls in available:
886
+ p_models = _get_models_safely(p_cls, self._client)
887
+ for m in p_models:
888
+ if m not in model_to_provider:
889
+ model_to_provider[m] = p_cls
890
+
891
+ if not model_to_provider:
892
+ return None
893
+
894
+ # 1. Exact match
895
+ for m_name in model_to_provider:
896
+ if m_name.lower() == model.lower():
897
+ return model_to_provider[m_name], m_name
898
+
899
+ # 2. Substring match
900
+ for m_name in model_to_provider:
901
+ if model.lower() in m_name.lower() or m_name.lower() in model.lower():
902
+ if self._client.print_provider_info:
903
+ print(f"\033[1;33mSubstring match (TTI): '{model}' -> '{m_name}'\033[0m")
904
+ return model_to_provider[m_name], m_name
905
+
906
+ # 3. Fuzzy match
907
+ matches = difflib.get_close_matches(model, model_to_provider.keys(), n=1, cutoff=0.5)
908
+ if matches:
909
+ matched_model = matches[0]
910
+ if self._client.print_provider_info:
911
+ print(f"\033[1;33mFuzzy match (TTI): '{model}' -> '{matched_model}'\033[0m")
912
+ return model_to_provider[matched_model], matched_model
913
+ return None
914
+
915
+ def _resolve_provider_and_model(
916
+ self, model: str, provider: Optional[Type[TTICompatibleProvider]]
917
+ ) -> Tuple[Type[TTICompatibleProvider], str]:
918
+ """
919
+ Resolves the best TTI provider and model name based on input specifications.
920
+
921
+ Handles multiple input formats:
922
+ - "provider/model" format: Parses and resolves to exact provider
923
+ - "auto": Randomly selects an available provider and model
924
+ - Named model: Searches across all providers for exact or fuzzy match
925
+
926
+ Resolution strategy:
927
+ 1. If "provider/model" format, find provider by name
928
+ 2. If provider specified, use it with given or auto-selected model
929
+ 3. If "auto", randomly select from available providers and models
930
+ 4. Otherwise, search across providers for exact match
931
+ 5. Fall back to fuzzy matching
932
+ 6. Finally, randomly select from available providers
933
+
934
+ Args:
935
+ model: Model identifier. Can be "auto", "provider/model", or model name.
936
+ provider: Optional TTI provider class to constrain resolution.
937
+
938
+ Returns:
939
+ A tuple of (provider_class, resolved_model_name).
940
+
941
+ Raises:
942
+ RuntimeError: If no providers are available or model cannot be resolved.
943
+
944
+ Examples:
945
+ >>> # Auto resolution
946
+ >>> p_cls, m_name = images._resolve_provider_and_model("auto", None)
947
+ >>> # Explicit provider/model
948
+ >>> p_cls, m_name = images._resolve_provider_and_model(
949
+ ... "StableDiffusion/stable-diffusion-v1-5", None
950
+ ... )
951
+ """
952
+ if "/" in model:
953
+ p_name, m_name = model.split("/", 1)
954
+ found_p = next(
955
+ (cls for name, cls in TTI_PROVIDERS.items() if name.lower() == p_name.lower()), None
956
+ )
957
+ if found_p:
958
+ return found_p, m_name
959
+
960
+ if provider:
961
+ resolved_model = model
962
+ if model == "auto":
963
+ p_models = _get_models_safely(provider, self._client)
964
+ if p_models:
965
+ resolved_model = random.choice(p_models)
966
+ else:
967
+ raise RuntimeError(f"TTI Provider {provider.__name__} has no available models.")
968
+ return provider, resolved_model
969
+
970
+ if model == "auto":
971
+ available = self._get_available_providers()
972
+ if not available:
973
+ raise RuntimeError("No available image providers found.")
974
+
975
+ providers_with_models = []
976
+ for name, cls in available:
977
+ p_models = _get_models_safely(cls, self._client)
978
+ if p_models:
979
+ providers_with_models.append((cls, p_models))
980
+
981
+ if providers_with_models:
982
+ p_cls, p_models = random.choice(providers_with_models)
983
+ return p_cls, random.choice(p_models)
984
+ else:
985
+ raise RuntimeError("No available image providers with models found.")
986
+
987
+ available = self._get_available_providers()
988
+ for p_name, p_cls in available:
989
+ p_models = _get_models_safely(p_cls, self._client)
990
+ if p_models and model in p_models:
991
+ return p_cls, model
992
+
993
+ fuzzy_result = self._fuzzy_resolve_provider_and_model(model)
994
+ if fuzzy_result:
995
+ return fuzzy_result
996
+
997
+ if available:
998
+ random.shuffle(available)
999
+ return available[0][1], model
1000
+ raise RuntimeError(f"No image providers found for model '{model}'")
1001
+
1002
+ def _get_available_providers(self) -> List[Tuple[str, Type[TTICompatibleProvider]]]:
1003
+ """
1004
+ Returns a list of available image providers for the current client configuration.
1005
+
1006
+ Filters the global TTI provider registry based on:
1007
+ - Client's exclude_images list
1008
+ - API key availability (if api_key is set, includes auth-required providers)
1009
+
1010
+ Returns:
1011
+ A list of tuples containing (provider_name, provider_class) pairs
1012
+ for all available image providers.
1013
+
1014
+ Examples:
1015
+ >>> providers = images._get_available_providers()
1016
+ >>> print([p[0] for p in providers])
1017
+ ['StableDiffusion', 'DALL-E', 'Midjourney']
1018
+ """
1019
+ exclude = set(self._client.exclude_images or [])
1020
+ if self._client.api_key:
1021
+ return [(name, cls) for name, cls in TTI_PROVIDERS.items() if name not in exclude]
1022
+ return [
1023
+ (name, cls)
1024
+ for name, cls in TTI_PROVIDERS.items()
1025
+ if name not in TTI_AUTH_REQUIRED and name not in exclude
1026
+ ]
1027
+
1028
+ def generate(
1029
+ self,
1030
+ *,
1031
+ prompt: str,
1032
+ model: str = "auto",
1033
+ n: int = 1,
1034
+ size: str = "1024x1024",
1035
+ response_format: str = "url",
1036
+ provider: Optional[Type[TTICompatibleProvider]] = None,
1037
+ **kwargs: Any,
1038
+ ) -> ImageResponse:
1039
+ """
1040
+ Generates images with automatic provider selection and failover.
1041
+
1042
+ Attempts to resolve the specified model to a provider and model name,
1043
+ then creates images. If the initial attempt fails, automatically falls
1044
+ back to other available providers, prioritizing:
1045
+ 1. Providers with exact model matches
1046
+ 2. Providers with fuzzy model matches
1047
+ 3. Providers with any available model
1048
+
1049
+ Args:
1050
+ prompt: Text description of the image(s) to generate. Required.
1051
+ model: Model identifier. Default "auto" randomly selects available models.
1052
+ Can be "provider/model" format or model name.
1053
+ n: Number of images to generate. Default is 1.
1054
+ size: Image size specification (e.g., "1024x1024", "512x512"). Default is "1024x1024".
1055
+ response_format: Format for image response ("url" or "b64_json"). Default is "url".
1056
+ provider: Specific TTI provider class to use. Optional.
1057
+ **kwargs: Additional arguments passed to the provider.
1058
+
1059
+ Returns:
1060
+ ImageResponse object containing generated images with URLs or base64 data.
1061
+
1062
+ Raises:
1063
+ RuntimeError: If all image providers fail or no providers are available.
1064
+
1065
+ Note:
1066
+ If print_provider_info is True, provider name and model are printed
1067
+ to stdout in color-formatted text.
1068
+
1069
+ Examples:
1070
+ >>> client = Client(print_provider_info=True)
1071
+ >>> response = client.images.generate(
1072
+ ... prompt="A beautiful sunset over mountains",
1073
+ ... model="auto",
1074
+ ... n=1,
1075
+ ... size="1024x1024"
1076
+ ... )
1077
+ >>> print(response.data[0].url)
1078
+
1079
+ >>> # Using specific provider
1080
+ >>> from webscout.Provider.TTI.stable import StableDiffusion
1081
+ >>> response = client.images.generate(
1082
+ ... prompt="A cat wearing sunglasses",
1083
+ ... provider=StableDiffusion
1084
+ ... )
1085
+ """
1086
+ try:
1087
+ resolved_provider, resolved_model = self._resolve_provider_and_model(model, provider)
1088
+ except Exception:
1089
+ resolved_provider, resolved_model = None, model
1090
+
1091
+ call_kwargs = {
1092
+ "prompt": prompt,
1093
+ "model": resolved_model,
1094
+ "n": n,
1095
+ "size": size,
1096
+ "response_format": response_format,
1097
+ }
1098
+ call_kwargs.update(kwargs)
1099
+
1100
+ if resolved_provider:
1101
+ try:
1102
+ provider_instance = self._get_provider_instance(resolved_provider)
1103
+ response = provider_instance.images.create(
1104
+ **cast(Dict[str, Any], call_kwargs)
1105
+ )
1106
+ self._last_provider = resolved_provider.__name__
1107
+ if self._client.print_provider_info:
1108
+ print(f"\033[1;34m{resolved_provider.__name__}:{resolved_model}\033[0m\n")
1109
+ return response
1110
+ except Exception:
1111
+ pass
1112
+
1113
+ all_available = self._get_available_providers()
1114
+ tier1, tier2, tier3 = [], [], []
1115
+ base_model = model.split("/")[-1] if "/" in model else model
1116
+ search_models = {base_model, resolved_model} if resolved_model else {base_model}
1117
+
1118
+ for p_name, p_cls in all_available:
1119
+ if p_cls == resolved_provider:
1120
+ continue
1121
+
1122
+ p_models = _get_models_safely(p_cls, self._client)
1123
+ if not p_models:
1124
+ fallback_model = (
1125
+ base_model
1126
+ if base_model != "auto"
1127
+ else (p_models[0] if p_models else base_model)
1128
+ )
1129
+ tier3.append((p_name, p_cls, fallback_model))
1130
+ continue
1131
+
1132
+ found_exact = False
1133
+ for sm in search_models:
1134
+ if sm != "auto" and sm in p_models:
1135
+ tier1.append((p_name, p_cls, sm))
1136
+ found_exact = True
1137
+ break
1138
+ if found_exact:
1139
+ continue
1140
+
1141
+ if base_model != "auto":
1142
+ matches = difflib.get_close_matches(base_model, p_models, n=1, cutoff=0.5)
1143
+ if matches:
1144
+ tier2.append((p_name, p_cls, matches[0]))
1145
+ continue
1146
+
1147
+ tier3.append((p_name, p_cls, random.choice(p_models)))
1148
+
1149
+ random.shuffle(tier1)
1150
+ random.shuffle(tier2)
1151
+ random.shuffle(tier3)
1152
+ fallback_queue = tier1 + tier2 + tier3
1153
+
1154
+ for p_name, p_cls, p_model in fallback_queue:
1155
+ try:
1156
+ provider_instance = self._get_provider_instance(p_cls)
1157
+ fallback_kwargs = cast(
1158
+ Dict[str, Any], {**call_kwargs, "model": p_model}
1159
+ )
1160
+ response = provider_instance.images.create(**fallback_kwargs)
1161
+ self._last_provider = p_name
1162
+ if self._client.print_provider_info:
1163
+ print(f"\033[1;34m{p_name}:{p_model} (Fallback)\033[0m\n")
1164
+ return response
1165
+ except Exception:
1166
+ continue
1167
+ raise RuntimeError("All image providers failed.")
1168
+
1169
+ def create(self, **kwargs) -> ImageResponse:
1170
+ """
1171
+ Alias for generate() method.
1172
+
1173
+ Provides compatibility with OpenAI-style image API where create() is
1174
+ the standard method name for image generation.
1175
+
1176
+ Args:
1177
+ **kwargs: All arguments accepted by generate().
1178
+
1179
+ Returns:
1180
+ ImageResponse object containing generated images.
1181
+
1182
+ Examples:
1183
+ >>> response = client.images.create(
1184
+ ... prompt="A robot painting a picture",
1185
+ ... model="auto"
1186
+ ... )
1187
+ """
1188
+ return self.generate(**kwargs)
1189
+
1190
+
1191
+ class Client:
1192
+ """
1193
+ Unified Webscout Client for AI chat and image generation.
1194
+
1195
+ A high-level client that provides a single interface for interacting with
1196
+ multiple AI providers (chat completions and image generation). Automatically
1197
+ selects, caches, and fails over between providers based on availability
1198
+ and model support.
1199
+
1200
+ This client aims to provide a seamless, provider-agnostic experience by:
1201
+ - Supporting automatic provider selection and fallback
1202
+ - Caching provider instances for performance
1203
+ - Offering intelligent model resolution (auto, provider/model, or model name)
1204
+ - Handling authentication across multiple providers
1205
+ - Providing detailed provider information when enabled
1206
+
1207
+ Attributes:
1208
+ provider: Optional default provider for chat completions.
1209
+ image_provider: Optional default provider for image generation.
1210
+ api_key: Optional API key for providers that support authentication.
1211
+ proxies: HTTP proxy configuration dictionary.
1212
+ exclude: List of provider names to exclude from chat completions.
1213
+ exclude_images: List of provider names to exclude from image generation.
1214
+ print_provider_info: Whether to print selected provider and model info.
1215
+ chat: ClientChat instance for chat completions.
1216
+ images: ClientImages instance for image generation.
1217
+
1218
+ Examples:
1219
+ >>> # Basic usage with automatic provider selection
1220
+ >>> client = Client()
1221
+ >>> response = client.chat.completions.create(
1222
+ ... model="auto",
1223
+ ... messages=[{"role": "user", "content": "Hello!"}]
1224
+ ... )
1225
+ >>> print(response.choices[0].message.content)
1226
+
1227
+ >>> # With provider information and image generation
1228
+ >>> client = Client(print_provider_info=True)
1229
+ >>> chat_response = client.chat.completions.create(
1230
+ ... model="gpt-4",
1231
+ ... messages=[{"role": "user", "content": "Describe an image"}]
1232
+ ... )
1233
+ >>> image_response = client.images.generate(
1234
+ ... prompt="A sunset over mountains",
1235
+ ... model="auto"
1236
+ ... )
1237
+
1238
+ >>> # Excluding certain providers and using API key
1239
+ >>> client = Client(
1240
+ ... api_key="your-api-key-here",
1241
+ ... exclude=["BadProvider"],
1242
+ ... exclude_images=["SlowProvider"]
1243
+ ... )
1244
+ """
1245
+
1246
+ def __init__(
1247
+ self,
1248
+ provider: Optional[Type[OpenAICompatibleProvider]] = None,
1249
+ image_provider: Optional[Type[TTICompatibleProvider]] = None,
1250
+ api_key: Optional[str] = None,
1251
+ proxies: Optional[dict] = None,
1252
+ exclude: Optional[List[str]] = None,
1253
+ exclude_images: Optional[List[str]] = None,
1254
+ print_provider_info: bool = False,
1255
+ **kwargs: Any,
1256
+ ):
1257
+ """
1258
+ Initialize the Webscout Client with optional configuration.
1259
+
1260
+ Args:
1261
+ provider: Default provider class for chat completions. If specified,
1262
+ this provider is prioritized in provider resolution. Optional.
1263
+ image_provider: Default provider class for image generation. If specified,
1264
+ this provider is prioritized in image resolution. Optional.
1265
+ api_key: API key for authenticated providers. If provided, enables access
1266
+ to providers that require authentication. Optional.
1267
+ proxies: Dictionary of proxy settings (e.g., {"http": "http://proxy:8080"}).
1268
+ Applied to all provider requests. Optional.
1269
+ exclude: List of provider names to exclude from chat completion selection.
1270
+ Names are case-insensitive. Optional.
1271
+ exclude_images: List of provider names to exclude from image generation selection.
1272
+ Names are case-insensitive. Optional.
1273
+ print_provider_info: If True, prints selected provider name and model to stdout
1274
+ before each request. Useful for debugging. Default is False.
1275
+ **kwargs: Additional keyword arguments stored for future use.
1276
+
1277
+ Examples:
1278
+ >>> # Minimal setup - use default providers
1279
+ >>> client = Client()
1280
+
1281
+ >>> # With authentication and custom settings
1282
+ >>> client = Client(
1283
+ ... api_key="sk-1234567890abcdef",
1284
+ ... proxies={"http": "http://proxy.example.com:8080"},
1285
+ ... exclude=["UnreliableProvider"],
1286
+ ... print_provider_info=True
1287
+ ... )
1288
+
1289
+ >>> # With specific default providers
1290
+ >>> from webscout.Provider.OPENAI.groq import Groq
1291
+ >>> from webscout.Provider.TTI.stable import StableDiffusion
1292
+ >>> client = Client(
1293
+ ... provider=Groq,
1294
+ ... image_provider=StableDiffusion
1295
+ ... )
1296
+ """
1297
+ self.provider = provider
1298
+ self.image_provider = image_provider
1299
+ self.api_key = api_key
1300
+ self.proxies = proxies or {}
1301
+ self.exclude = [e.upper() if e else e for e in (exclude or [])]
1302
+ self.exclude_images = [e.upper() if e else e for e in (exclude_images or [])]
1303
+ self.print_provider_info = print_provider_info
1304
+ self.kwargs = kwargs
1305
+
1306
+ self._provider_cache = {}
1307
+ self.chat = ClientChat(self)
1308
+ self.images = ClientImages(self)
1309
+
1310
+ @staticmethod
1311
+ def get_chat_providers() -> List[str]:
1312
+ """
1313
+ Returns a list of all available chat provider names.
1314
+
1315
+ Queries the global OPENAI_PROVIDERS registry that is populated
1316
+ at module load time. Names are not normalized and appear as
1317
+ defined in their respective classes.
1318
+
1319
+ Returns:
1320
+ List of provider class names available for chat completions.
1321
+
1322
+ Examples:
1323
+ >>> providers = Client.get_chat_providers()
1324
+ >>> print("GPT4Free" in providers)
1325
+ True
1326
+ >>> print(len(providers))
1327
+ 42
1328
+ """
1329
+ return list(OPENAI_PROVIDERS.keys())
1330
+
1331
+ @staticmethod
1332
+ def get_image_providers() -> List[str]:
1333
+ """
1334
+ Returns a list of all available image provider names.
1335
+
1336
+ Queries the global TTI_PROVIDERS registry that is populated
1337
+ at module load time. Names are not normalized and appear as
1338
+ defined in their respective classes.
1339
+
1340
+ Returns:
1341
+ List of provider class names available for image generation.
1342
+
1343
+ Examples:
1344
+ >>> providers = Client.get_image_providers()
1345
+ >>> print("StableDiffusion" in providers)
1346
+ True
1347
+ >>> print(len(providers))
1348
+ 8
1349
+ """
1350
+ return list(TTI_PROVIDERS.keys())
1351
+
1352
+ @staticmethod
1353
+ def get_free_chat_providers() -> List[str]:
1354
+ """
1355
+ Returns a list of chat providers that don't require authentication.
1356
+
1357
+ Filters the global OPENAI_PROVIDERS registry to include only providers
1358
+ where required_auth is False. These providers can be used without
1359
+ an API key.
1360
+
1361
+ Returns:
1362
+ List of free chat provider class names.
1363
+
1364
+ Examples:
1365
+ >>> free_providers = Client.get_free_chat_providers()
1366
+ >>> print("GPT4Free" in free_providers)
1367
+ True
1368
+ >>> print(len(free_providers))
1369
+ 35
1370
+ """
1371
+ return [name for name in OPENAI_PROVIDERS.keys() if name not in OPENAI_AUTH_REQUIRED]
1372
+
1373
+ @staticmethod
1374
+ def get_free_image_providers() -> List[str]:
1375
+ """
1376
+ Returns a list of image providers that don't require authentication.
1377
+
1378
+ Filters the global TTI_PROVIDERS registry to include only providers
1379
+ where required_auth is False. These providers can be used without
1380
+ an API key.
1381
+
1382
+ Returns:
1383
+ List of free image provider class names.
1384
+
1385
+ Examples:
1386
+ >>> free_providers = Client.get_free_image_providers()
1387
+ >>> print("StableDiffusion" in free_providers)
1388
+ True
1389
+ >>> print(len(free_providers))
1390
+ 6
1391
+ """
1392
+ return [name for name in TTI_PROVIDERS.keys() if name not in TTI_AUTH_REQUIRED]
1393
+
1394
+
1395
+ try:
1396
+ from webscout.server.server import run_api as _run_api_impl
1397
+ from webscout.server.server import run_api as _start_server_impl
1398
+
1399
+ def run_api(*args: Any, **kwargs: Any) -> Any:
1400
+ """
1401
+ Runs the FastAPI OpenAI-compatible API server.
1402
+
1403
+ Delegates to webscout.server.server.run_api to start an OpenAI-compatible
1404
+ HTTP API server that provides chat and image endpoints. Requires the
1405
+ 'api' optional dependencies to be installed.
1406
+
1407
+ Args:
1408
+ *args: Positional arguments passed to the underlying run_api implementation.
1409
+ **kwargs: Keyword arguments passed to the underlying run_api implementation.
1410
+ Common options include host, port, debug, and reload.
1411
+
1412
+ Returns:
1413
+ The return value from the underlying FastAPI run function.
1414
+
1415
+ Raises:
1416
+ ImportError: If webscout.server.server is not available.
1417
+
1418
+ Examples:
1419
+ >>> from webscout.client import run_api
1420
+ >>> run_api(host="0.0.0.0", port=8000)
1421
+ """
1422
+ return _run_api_impl(*args, **kwargs)
1423
+
1424
+ def start_server(*args: Any, **kwargs: Any) -> Any:
1425
+ """
1426
+ Starts the FastAPI OpenAI-compatible API server.
1427
+
1428
+ Delegates to webscout.server.server.start_server to initialize and run
1429
+ an OpenAI-compatible HTTP API server. This is typically the main entry
1430
+ point for starting the webscout server in production or development.
1431
+
1432
+ Args:
1433
+ *args: Positional arguments passed to the underlying start_server implementation.
1434
+ **kwargs: Keyword arguments passed to the underlying start_server implementation.
1435
+ Common options include host, port, workers, and config paths.
1436
+
1437
+ Returns:
1438
+ The return value from the underlying server implementation.
1439
+
1440
+ Raises:
1441
+ ImportError: If webscout.server.server is not available.
1442
+
1443
+ Examples:
1444
+ >>> from webscout.client import start_server
1445
+ >>> start_server()
1446
+ """
1447
+ return _start_server_impl(*args, **kwargs)
1448
+
1449
+ except ImportError:
1450
+
1451
+ def run_api(*args: Any, **kwargs: Any) -> Any:
1452
+ """
1453
+ Runs the FastAPI OpenAI-compatible API server.
1454
+
1455
+ Raises ImportError if the server module is not available.
1456
+ Install with: pip install webscout[api]
1457
+
1458
+ Raises:
1459
+ ImportError: Always raised; server not available in current environment.
1460
+ """
1461
+ raise ImportError("webscout.server.server.run_api is not available.")
1462
+
1463
+ def start_server(*args: Any, **kwargs: Any) -> Any:
1464
+ """
1465
+ Starts the FastAPI OpenAI-compatible API server.
1466
+
1467
+ Raises ImportError if the server module is not available.
1468
+ Install with: pip install webscout[api]
1469
+
1470
+ Raises:
1471
+ ImportError: Always raised; server not available in current environment.
1472
+ """
1473
+ raise ImportError("webscout.server.server.start_server is not available.")
1474
+
1475
+
1476
+ if __name__ == "__main__":
1477
+ client = Client(print_provider_info=True)
1478
+ print("Testing auto resolution...")
1479
+ try:
1480
+ response = client.chat.completions.create(
1481
+ model="auto", messages=[{"role": "user", "content": "Hi"}]
1482
+ )
1483
+ if not inspect.isgenerator(response):
1484
+ completion = cast(ChatCompletion, response)
1485
+ if (
1486
+ completion
1487
+ and completion.choices
1488
+ and completion.choices[0].message
1489
+ and completion.choices[0].message.content
1490
+ ):
1491
+ print(f"Auto Result: {completion.choices[0].message.content[:50]}...")
1492
+ else:
1493
+ print("Auto Result: Empty response")
1494
+ else:
1495
+ print("Streaming response received")
1496
+ except Exception as e:
1497
+ print(f"Error: {e}")