webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (413) hide show
  1. webscout/AIauto.py +524 -251
  2. webscout/AIbase.py +247 -319
  3. webscout/AIutel.py +68 -703
  4. webscout/Bard.py +1072 -1026
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +403 -232
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -684
  37. webscout/Extra/tempmail/README.md +487 -487
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +292 -333
  49. webscout/Provider/AISEARCH/README.md +106 -279
  50. webscout/Provider/AISEARCH/__init__.py +16 -9
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +357 -410
  53. webscout/Provider/AISEARCH/monica_search.py +200 -220
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -342
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +467 -340
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +175 -169
  67. webscout/Provider/GithubChat.py +385 -369
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -801
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -375
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -291
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -384
  77. webscout/Provider/Netwrck.py +273 -270
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -952
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -357
  85. webscout/Provider/OPENAI/__init__.py +148 -40
  86. webscout/Provider/OPENAI/ai4chat.py +348 -293
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
  90. webscout/Provider/OPENAI/base.py +253 -249
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +870 -556
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -173
  94. webscout/Provider/OPENAI/deepinfra.py +403 -322
  95. webscout/Provider/OPENAI/e2b.py +2370 -1414
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +452 -417
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -364
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +333 -308
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -335
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +374 -357
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -287
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +305 -304
  115. webscout/Provider/OPENAI/textpollinations.py +370 -339
  116. webscout/Provider/OPENAI/toolbaz.py +375 -413
  117. webscout/Provider/OPENAI/typefully.py +419 -355
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -318
  120. webscout/Provider/OPENAI/wisecat.py +359 -387
  121. webscout/Provider/OPENAI/writecream.py +185 -163
  122. webscout/Provider/OPENAI/x0gpt.py +462 -365
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -429
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -82
  133. webscout/Provider/TTI/__init__.py +37 -7
  134. webscout/Provider/TTI/base.py +147 -64
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -201
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -221
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -11
  141. webscout/Provider/TTS/README.md +186 -192
  142. webscout/Provider/TTS/__init__.py +43 -10
  143. webscout/Provider/TTS/base.py +523 -159
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -129
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -580
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +331 -308
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -475
  158. webscout/Provider/TypliAI.py +311 -305
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -209
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
  161. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
  162. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
  165. webscout/Provider/UNFINISHED/Youchat.py +347 -330
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
  170. webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
  171. webscout/Provider/WiseCat.py +256 -233
  172. webscout/Provider/WrDoChat.py +390 -370
  173. webscout/Provider/__init__.py +115 -174
  174. webscout/Provider/ai4chat.py +181 -174
  175. webscout/Provider/akashgpt.py +330 -335
  176. webscout/Provider/cerebras.py +397 -290
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -283
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -325
  182. webscout/Provider/llama3mitril.py +230 -215
  183. webscout/Provider/llmchat.py +308 -258
  184. webscout/Provider/llmchatco.py +321 -306
  185. webscout/Provider/meta.py +996 -801
  186. webscout/Provider/oivscode.py +332 -309
  187. webscout/Provider/searchchat.py +316 -292
  188. webscout/Provider/sonus.py +264 -258
  189. webscout/Provider/toolbaz.py +359 -353
  190. webscout/Provider/turboseek.py +332 -266
  191. webscout/Provider/typefully.py +262 -202
  192. webscout/Provider/x0gpt.py +332 -299
  193. webscout/__init__.py +31 -39
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -524
  196. webscout/client.py +1497 -70
  197. webscout/conversation.py +140 -436
  198. webscout/exceptions.py +383 -362
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +74 -420
  204. webscout/prompt_manager.py +376 -288
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -404
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -210
  210. webscout/scout/core/scout.py +800 -607
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -478
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -95
  284. webscout/swiftcli/core/__init__.py +7 -7
  285. webscout/swiftcli/core/cli.py +574 -297
  286. webscout/swiftcli/core/context.py +98 -104
  287. webscout/swiftcli/core/group.py +268 -241
  288. webscout/swiftcli/decorators/__init__.py +28 -28
  289. webscout/swiftcli/decorators/command.py +243 -221
  290. webscout/swiftcli/decorators/options.py +247 -220
  291. webscout/swiftcli/decorators/output.py +392 -252
  292. webscout/swiftcli/exceptions.py +21 -21
  293. webscout/swiftcli/plugins/__init__.py +9 -9
  294. webscout/swiftcli/plugins/base.py +134 -135
  295. webscout/swiftcli/plugins/manager.py +269 -269
  296. webscout/swiftcli/utils/__init__.py +58 -59
  297. webscout/swiftcli/utils/formatting.py +251 -252
  298. webscout/swiftcli/utils/parsing.py +368 -267
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -135
  304. webscout/zeroart/base.py +70 -66
  305. webscout/zeroart/effects.py +155 -101
  306. webscout/zeroart/fonts.py +1799 -1239
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
  311. webscout/DWEBS.py +0 -520
  312. webscout/Extra/Act.md +0 -309
  313. webscout/Extra/GitToolkit/gitapi/README.md +0 -110
  314. webscout/Extra/autocoder/__init__.py +0 -9
  315. webscout/Extra/autocoder/autocoder.py +0 -1105
  316. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  317. webscout/Extra/gguf.md +0 -430
  318. webscout/Extra/weather.md +0 -281
  319. webscout/Litlogger/README.md +0 -10
  320. webscout/Litlogger/__init__.py +0 -15
  321. webscout/Litlogger/formats.py +0 -4
  322. webscout/Litlogger/handlers.py +0 -103
  323. webscout/Litlogger/levels.py +0 -13
  324. webscout/Litlogger/logger.py +0 -92
  325. webscout/Provider/AI21.py +0 -177
  326. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  327. webscout/Provider/AISEARCH/felo_search.py +0 -202
  328. webscout/Provider/AISEARCH/genspark_search.py +0 -324
  329. webscout/Provider/AISEARCH/hika_search.py +0 -186
  330. webscout/Provider/AISEARCH/scira_search.py +0 -298
  331. webscout/Provider/Aitopia.py +0 -316
  332. webscout/Provider/AllenAI.py +0 -440
  333. webscout/Provider/Blackboxai.py +0 -791
  334. webscout/Provider/ChatGPTClone.py +0 -237
  335. webscout/Provider/ChatGPTGratis.py +0 -194
  336. webscout/Provider/Cloudflare.py +0 -324
  337. webscout/Provider/ExaChat.py +0 -358
  338. webscout/Provider/Flowith.py +0 -217
  339. webscout/Provider/FreeGemini.py +0 -250
  340. webscout/Provider/Glider.py +0 -225
  341. webscout/Provider/HF_space/__init__.py +0 -0
  342. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  343. webscout/Provider/HuggingFaceChat.py +0 -469
  344. webscout/Provider/Hunyuan.py +0 -283
  345. webscout/Provider/LambdaChat.py +0 -411
  346. webscout/Provider/Llama3.py +0 -259
  347. webscout/Provider/Nemotron.py +0 -218
  348. webscout/Provider/OLLAMA.py +0 -396
  349. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
  350. webscout/Provider/OPENAI/Cloudflare.py +0 -378
  351. webscout/Provider/OPENAI/FreeGemini.py +0 -283
  352. webscout/Provider/OPENAI/NEMOTRON.py +0 -232
  353. webscout/Provider/OPENAI/Qwen3.py +0 -283
  354. webscout/Provider/OPENAI/api.py +0 -969
  355. webscout/Provider/OPENAI/c4ai.py +0 -373
  356. webscout/Provider/OPENAI/chatgptclone.py +0 -494
  357. webscout/Provider/OPENAI/copilot.py +0 -242
  358. webscout/Provider/OPENAI/flowith.py +0 -162
  359. webscout/Provider/OPENAI/freeaichat.py +0 -359
  360. webscout/Provider/OPENAI/mcpcore.py +0 -389
  361. webscout/Provider/OPENAI/multichat.py +0 -376
  362. webscout/Provider/OPENAI/opkfc.py +0 -496
  363. webscout/Provider/OPENAI/scirachat.py +0 -477
  364. webscout/Provider/OPENAI/standardinput.py +0 -433
  365. webscout/Provider/OPENAI/typegpt.py +0 -364
  366. webscout/Provider/OPENAI/uncovrAI.py +0 -463
  367. webscout/Provider/OPENAI/venice.py +0 -431
  368. webscout/Provider/OPENAI/yep.py +0 -382
  369. webscout/Provider/OpenGPT.py +0 -209
  370. webscout/Provider/Perplexitylabs.py +0 -415
  371. webscout/Provider/Reka.py +0 -214
  372. webscout/Provider/StandardInput.py +0 -290
  373. webscout/Provider/TTI/aiarta.py +0 -365
  374. webscout/Provider/TTI/artbit.py +0 -0
  375. webscout/Provider/TTI/fastflux.py +0 -200
  376. webscout/Provider/TTI/piclumen.py +0 -203
  377. webscout/Provider/TTI/pixelmuse.py +0 -225
  378. webscout/Provider/TTS/gesserit.py +0 -128
  379. webscout/Provider/TTS/sthir.py +0 -94
  380. webscout/Provider/TeachAnything.py +0 -229
  381. webscout/Provider/UNFINISHED/puterjs.py +0 -635
  382. webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
  383. webscout/Provider/Venice.py +0 -258
  384. webscout/Provider/VercelAI.py +0 -253
  385. webscout/Provider/Writecream.py +0 -246
  386. webscout/Provider/WritingMate.py +0 -269
  387. webscout/Provider/asksteve.py +0 -220
  388. webscout/Provider/chatglm.py +0 -215
  389. webscout/Provider/copilot.py +0 -425
  390. webscout/Provider/freeaichat.py +0 -285
  391. webscout/Provider/granite.py +0 -235
  392. webscout/Provider/hermes.py +0 -266
  393. webscout/Provider/koala.py +0 -170
  394. webscout/Provider/lmarena.py +0 -198
  395. webscout/Provider/multichat.py +0 -364
  396. webscout/Provider/scira_chat.py +0 -299
  397. webscout/Provider/scnet.py +0 -243
  398. webscout/Provider/talkai.py +0 -194
  399. webscout/Provider/typegpt.py +0 -289
  400. webscout/Provider/uncovr.py +0 -368
  401. webscout/Provider/yep.py +0 -389
  402. webscout/litagent/Readme.md +0 -276
  403. webscout/litprinter/__init__.py +0 -59
  404. webscout/swiftcli/Readme.md +0 -323
  405. webscout/tempid.py +0 -128
  406. webscout/webscout_search.py +0 -1184
  407. webscout/webscout_search_async.py +0 -654
  408. webscout/yep_search.py +0 -347
  409. webscout/zeroart/README.md +0 -89
  410. webscout-8.2.9.dist-info/METADATA +0 -1033
  411. webscout-8.2.9.dist-info/RECORD +0 -289
  412. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
  413. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
@@ -1,952 +1,890 @@
1
- <div align="center">
2
- <a href="https://github.com/OEvortex/Webscout">
3
- <img src="https://img.shields.io/badge/WebScout-OpenAI%20Compatible%20Providers-4285F4?style=for-the-badge&logo=openai&logoColor=white" alt="WebScout OpenAI Compatible Providers">
4
- </a>
5
- <br/>
6
- <h1>WebScout OpenAI-Compatible Providers</h1>
7
- <p><strong>Seamlessly integrate with various AI providers using OpenAI-compatible interfaces</strong></p>
8
-
9
- <p>
10
- <img src="https://img.shields.io/badge/Python-3.7+-3776AB?style=flat-square&logo=python&logoColor=white" alt="Python 3.7+">
11
- <img src="https://img.shields.io/badge/License-MIT-green?style=flat-square" alt="License: MIT">
12
- <img src="https://img.shields.io/badge/PRs-Welcome-brightgreen?style=flat-square" alt="PRs Welcome">
13
- </p>
14
-
15
- <p>
16
- Access multiple AI providers through a standardized OpenAI-compatible interface, making it easy to switch between providers without changing your code.
17
- </p>
18
- </div>
19
-
20
- ## 🚀 Overview
21
-
22
- The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
-
24
- * Use the same code structure across different AI providers
25
- * Switch between providers without major code changes
26
- * Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
-
28
- ## ⚙️ Available Providers
29
-
30
- Currently, the following providers are implemented with OpenAI-compatible interfaces:
31
-
32
- - DeepInfra
33
- - Glider
34
- - ChatGPTClone
35
- - X0GPT
36
- - WiseCat
37
- - Venice
38
- - ExaAI
39
- - TypeGPT
40
- - SciraChat
41
- - LLMChatCo
42
- - FreeAIChat
43
- - YEPCHAT
44
- - HeckAI
45
- - SonusAI
46
- - ExaChat
47
- - Netwrck
48
- - StandardInput
49
- - Writecream
50
- - toolbaz
51
- - UncovrAI
52
- - OPKFC
53
- - TextPollinations
54
- - E2B
55
- - MultiChatAI
56
- - AI4Chat
57
- - MCPCore
58
- - TypefullyAI
59
- - Flowith
60
- - ChatSandbox
61
- - Cloudflare
62
- - NEMOTRON
63
- - BLACKBOXAI
64
- - Copilot
65
- - TwoAI
66
- - oivscode
67
- - Qwen3
68
- ---
69
-
70
-
71
- ## 💻 Usage Examples
72
-
73
- Here are examples of how to use the OpenAI-compatible providers in your code.
74
-
75
- ### Basic Usage with DeepInfra
76
-
77
- ```python
78
- from webscout.client import DeepInfra
79
-
80
- # Initialize the client
81
- client = DeepInfra()
82
-
83
- # Create a completion (non-streaming)
84
- response = client.chat.completions.create(
85
- model="meta-llama/Meta-Llama-3.1-8B-Instruct",
86
- messages=[
87
- {"role": "system", "content": "You are a helpful assistant."},
88
- {"role": "user", "content": "Tell me about Python programming."}
89
- ],
90
- temperature=0.7,
91
- max_tokens=500
92
- )
93
-
94
- # Print the response
95
- print(response.choices[0].message.content)
96
- ```
97
-
98
- ### Basic Usage with Glider
99
-
100
- ```python
101
- from webscout.client import Glider
102
-
103
- # Initialize the client
104
- client = Glider()
105
-
106
- # Create a completion (non-streaming)
107
- response = client.chat.completions.create(
108
- model="chat-llama-3-1-70b",
109
- messages=[
110
- {"role": "system", "content": "You are a helpful assistant."},
111
- {"role": "user", "content": "Tell me about Python programming."}
112
- ],
113
- max_tokens=500
114
- )
115
-
116
- # Print the response
117
- print(response.choices[0].message.content)
118
- ```
119
-
120
- ### Streaming Responses (Example with DeepInfra)
121
-
122
- ```python
123
- from webscout.client import DeepInfra
124
-
125
- # Initialize the client
126
- client = DeepInfra()
127
-
128
- # Create a streaming completion
129
- stream = client.chat.completions.create(
130
- model="meta-llama/Meta-Llama-3.1-8B-Instruct",
131
- messages=[
132
- {"role": "system", "content": "You are a helpful assistant."},
133
- {"role": "user", "content": "Write a short poem about programming."}
134
- ],
135
- stream=True,
136
- temperature=0.7
137
- )
138
-
139
- # Process the streaming response
140
- for chunk in stream:
141
- if chunk.choices[0].delta.content:
142
- print(chunk.choices[0].delta.content, end="", flush=True)
143
- print() # Add a newline at the end
144
- ```
145
-
146
- ### Streaming with Glider
147
-
148
- ```python
149
- from webscout.client import Glider
150
-
151
- # Initialize the client
152
- client = Glider()
153
-
154
- # Create a streaming completion
155
- stream = client.chat.completions.create(
156
- model="chat-llama-3-1-70b",
157
- messages=[
158
- {"role": "system", "content": "You are a helpful assistant."},
159
- {"role": "user", "content": "Write a short poem about programming."}
160
- ],
161
- stream=True
162
- )
163
-
164
- # Process the streaming response
165
- for chunk in stream:
166
- if chunk.choices[0].delta.content:
167
- print(chunk.choices[0].delta.content, end="", flush=True)
168
- print() # Add a newline at the end
169
- ```
170
-
171
- ### Basic Usage with ChatGPTClone
172
-
173
- ```python
174
- from webscout.client import ChatGPTClone
175
-
176
- # Initialize the client
177
- client = ChatGPTClone()
178
-
179
- # Create a completion (non-streaming)
180
- response = client.chat.completions.create(
181
- model="gpt-4",
182
- messages=[
183
- {"role": "system", "content": "You are a helpful assistant."},
184
- {"role": "user", "content": "Tell me about Python programming."}
185
- ],
186
- temperature=0.7
187
- )
188
-
189
- # Print the response
190
- print(response.choices[0].message.content)
191
- ```
192
-
193
- ### Streaming with ChatGPTClone
194
-
195
- ```python
196
- from webscout.client import ChatGPTClone
197
-
198
- # Initialize the client
199
- client = ChatGPTClone()
200
-
201
- # Create a streaming completion
202
- stream = client.chat.completions.create(
203
- model="gpt-4",
204
- messages=[
205
- {"role": "system", "content": "You are a helpful assistant."},
206
- {"role": "user", "content": "Write a short poem about programming."}
207
- ],
208
- stream=True
209
- )
210
-
211
- # Process the streaming response
212
- for chunk in stream:
213
- if chunk.choices[0].delta.content:
214
- print(chunk.choices[0].delta.content, end="", flush=True)
215
- print() # Add a newline at the end
216
- ```
217
-
218
- ### Basic Usage with X0GPT
219
-
220
- ```python
221
- from webscout.client import X0GPT
222
-
223
- # Initialize the client
224
- client = X0GPT()
225
-
226
- # Create a completion (non-streaming)
227
- response = client.chat.completions.create(
228
- model="gpt-4", # Model name doesn't matter for X0GPT
229
- messages=[
230
- {"role": "system", "content": "You are a helpful assistant."},
231
- {"role": "user", "content": "Tell me about Python programming."}
232
- ]
233
- )
234
-
235
- # Print the response
236
- print(response.choices[0].message.content)
237
- ```
238
-
239
- ### Streaming with X0GPT
240
-
241
- ```python
242
- from webscout.client import X0GPT
243
-
244
- # Initialize the client
245
- client = X0GPT()
246
-
247
- # Create a streaming completion
248
- stream = client.chat.completions.create(
249
- model="gpt-4", # Model name doesn't matter for X0GPT
250
- messages=[
251
- {"role": "system", "content": "You are a helpful assistant."},
252
- {"role": "user", "content": "Write a short poem about programming."}
253
- ],
254
- stream=True
255
- )
256
-
257
- # Process the streaming response
258
- for chunk in stream:
259
- if chunk.choices[0].delta.content:
260
- print(chunk.choices[0].delta.content, end="", flush=True)
261
- print() # Add a newline at the end
262
- ```
263
-
264
- ### Basic Usage with WiseCat
265
-
266
- ```python
267
- from webscout.client import WiseCat
268
-
269
- # Initialize the client
270
- client = WiseCat()
271
-
272
- # Create a completion (non-streaming)
273
- response = client.chat.completions.create(
274
- model="chat-model-small",
275
- messages=[
276
- {"role": "system", "content": "You are a helpful assistant."},
277
- {"role": "user", "content": "Tell me about Python programming."}
278
- ]
279
- )
280
-
281
- # Print the response
282
- print(response.choices[0].message.content)
283
- ```
284
-
285
- ### Streaming with WiseCat
286
-
287
- ```python
288
- from webscout.client import WiseCat
289
-
290
- # Initialize the client
291
- client = WiseCat()
292
-
293
- # Create a streaming completion
294
- stream = client.chat.completions.create(
295
- model="chat-model-small",
296
- messages=[
297
- {"role": "system", "content": "You are a helpful assistant."},
298
- {"role": "user", "content": "Write a short poem about programming."}
299
- ],
300
- stream=True
301
- )
302
-
303
- # Process the streaming response
304
- for chunk in stream:
305
- if chunk.choices[0].delta.content:
306
- print(chunk.choices[0].delta.content, end="", flush=True)
307
- print() # Add a newline at the end
308
- ```
309
-
310
- ### Basic Usage with Venice
311
-
312
- ```python
313
- from webscout.client import Venice
314
-
315
- # Initialize the client
316
- client = Venice(temperature=0.7, top_p=0.9)
317
-
318
- # Create a completion (non-streaming)
319
- response = client.chat.completions.create(
320
- model="mistral-31-24b",
321
- messages=[
322
- {"role": "system", "content": "You are a helpful assistant."},
323
- {"role": "user", "content": "Tell me about Python programming."}
324
- ]
325
- )
326
-
327
- # Print the response
328
- print(response.choices[0].message.content)
329
- ```
330
-
331
- ### Streaming with Venice
332
-
333
- ```python
334
- from webscout.client import Venice
335
-
336
- # Initialize the client
337
- client = Venice()
338
-
339
- # Create a streaming completion
340
- stream = client.chat.completions.create(
341
- model="mistral-31-24b",
342
- messages=[
343
- {"role": "system", "content": "You are a helpful assistant."},
344
- {"role": "user", "content": "Write a short poem about programming."}
345
- ],
346
- stream=True
347
- )
348
-
349
- # Process the streaming response
350
- for chunk in stream:
351
- if chunk.choices[0].delta.content:
352
- print(chunk.choices[0].delta.content, end="", flush=True)
353
- print() # Add a newline at the end
354
- ```
355
-
356
- ### Basic Usage with ExaAI
357
-
358
- ```python
359
- from webscout.client import ExaAI
360
-
361
- # Initialize the client
362
- client = ExaAI()
363
-
364
- # Create a completion (non-streaming)
365
- response = client.chat.completions.create(
366
- model="O3-Mini",
367
- messages=[
368
- # Note: ExaAI does not support system messages (they will be removed)
369
- {"role": "user", "content": "Hello!"},
370
- {"role": "assistant", "content": "Hi there! How can I help you today?"},
371
- {"role": "user", "content": "Tell me about Python programming."}
372
- ]
373
- )
374
-
375
- # Print the response
376
- print(response.choices[0].message.content)
377
- ```
378
-
379
- ### Basic Usage with HeckAI
380
-
381
- ```python
382
- from webscout.client import HeckAI
383
-
384
- # Initialize the client
385
- client = HeckAI(language="English")
386
-
387
- # Create a completion (non-streaming)
388
- response = client.chat.completions.create(
389
- model="google/gemini-2.0-flash-001",
390
- messages=[
391
- {"role": "system", "content": "You are a helpful assistant."},
392
- {"role": "user", "content": "Tell me about Python programming."}
393
- ]
394
- )
395
-
396
- # Print the response
397
- print(response.choices[0].message.content)
398
- ```
399
-
400
- ### Streaming with HeckAI
401
-
402
- ```python
403
- from webscout.client import HeckAI
404
-
405
- # Initialize the client
406
- client = HeckAI()
407
-
408
- # Create a streaming completion
409
- stream = client.chat.completions.create(
410
- model="google/gemini-2.0-flash-001",
411
- messages=[
412
- {"role": "system", "content": "You are a helpful assistant."},
413
- {"role": "user", "content": "Write a short poem about programming."}
414
- ],
415
- stream=True
416
- )
417
-
418
- # Process the streaming response
419
- for chunk in stream:
420
- if chunk.choices[0].delta.content:
421
- print(chunk.choices[0].delta.content, end="", flush=True)
422
- print() # Add a newline at the end
423
- ```
424
-
425
- ### Streaming with ExaAI
426
-
427
- ```python
428
- from webscout.client import ExaAI
429
-
430
- # Initialize the client
431
- client = ExaAI()
432
-
433
- # Create a streaming completion
434
- stream = client.chat.completions.create(
435
- model="O3-Mini",
436
- messages=[
437
- # Note: ExaAI does not support system messages (they will be removed)
438
- {"role": "user", "content": "Hello!"},
439
- {"role": "assistant", "content": "Hi there! How can I help you today?"},
440
- {"role": "user", "content": "Write a short poem about programming."}
441
- ],
442
- stream=True
443
- )
444
-
445
- # Process the streaming response
446
- for chunk in stream:
447
- if chunk.choices[0].delta.content:
448
- print(chunk.choices[0].delta.content, end="", flush=True)
449
- print() # Add a newline at the end
450
- ```
451
-
452
- ### Basic Usage with TypeGPT
453
-
454
- ```python
455
- from webscout.client import TypeGPT
456
-
457
- # Initialize the client
458
- client = TypeGPT()
459
-
460
- # Create a completion (non-streaming)
461
- response = client.chat.completions.create(
462
- model="chatgpt-4o-latest",
463
- messages=[
464
- {"role": "system", "content": "You are a helpful assistant."},
465
- {"role": "user", "content": "Write a short poem about programming."}
466
- ]
467
- )
468
-
469
- # Print the response
470
- print(response.choices[0].message.content)
471
- ```
472
-
473
- ### Streaming with TypeGPT
474
-
475
- ```python
476
- from webscout.client import TypeGPT
477
-
478
- # Initialize the client
479
- client = TypeGPT()
480
-
481
- # Create a streaming completion
482
- stream = client.chat.completions.create(
483
- model="chatgpt-4o-latest",
484
- messages=[
485
- {"role": "system", "content": "You are a helpful assistant."},
486
- {"role": "user", "content": "Write a short poem about programming."}
487
- ],
488
- stream=True
489
- )
490
-
491
- # Process the streaming response
492
- for chunk in stream:
493
- if chunk.choices[0].delta.content:
494
- print(chunk.choices[0].delta.content, end="", flush=True)
495
- print() # Add a newline at the end
496
- ```
497
-
498
- ### Basic Usage with SciraChat
499
-
500
- ```python
501
- from webscout.client import SciraChat
502
-
503
- # Initialize the client
504
- client = SciraChat()
505
-
506
- # Create a completion (non-streaming)
507
- response = client.chat.completions.create(
508
- model="scira-default",
509
- messages=[
510
- {"role": "system", "content": "You are a helpful assistant."},
511
- {"role": "user", "content": "Tell me about Python programming."}
512
- ]
513
- )
514
-
515
- # Print the response
516
- print(response.choices[0].message.content)
517
- ```
518
-
519
- ### Streaming with SciraChat
520
-
521
- ```python
522
- from webscout.client import SciraChat
523
-
524
- # Initialize the client
525
- client = SciraChat()
526
-
527
- # Create a streaming completion
528
- stream = client.chat.completions.create(
529
- model="scira-default",
530
- messages=[
531
- {"role": "system", "content": "You are a helpful assistant."},
532
- {"role": "user", "content": "Write a short poem about programming."}
533
- ],
534
- stream=True
535
- )
536
-
537
- # Process the streaming response
538
- for chunk in stream:
539
- if chunk.choices[0].delta.content:
540
- print(chunk.choices[0].delta.content, end="", flush=True)
541
- print() # Add a newline at the end
542
- ```
543
-
544
- ### Basic Usage with FreeAIChat
545
-
546
- ```python
547
- from webscout.client import FreeAIChat
548
-
549
- # Initialize the client
550
- client = FreeAIChat()
551
-
552
- # Create a completion (non-streaming)
553
- response = client.chat.completions.create(
554
- model="GPT 4o",
555
- messages=[
556
- {"role": "system", "content": "You are a helpful assistant."},
557
- {"role": "user", "content": "Tell me about Python programming."}
558
- ]
559
- )
560
-
561
- # Print the response
562
- print(response.choices[0].message.content)
563
- ```
564
-
565
- ### Streaming with FreeAIChat
566
-
567
- ```python
568
- from webscout.client import FreeAIChat
569
-
570
- # Initialize the client
571
- client = FreeAIChat()
572
-
573
- # Create a streaming completion
574
- stream = client.chat.completions.create(
575
- model="GPT 4o",
576
- messages=[
577
- {"role": "system", "content": "You are a helpful assistant."},
578
- {"role": "user", "content": "Write a short poem about programming."}
579
- ],
580
- stream=True
581
- )
582
-
583
- # Process the streaming response
584
- for chunk in stream:
585
- if chunk.choices[0].delta.content:
586
- print(chunk.choices[0].delta.content, end="", flush=True)
587
- print() # Add a newline at the end
588
- ```
589
-
590
- ### Basic Usage with LLMChatCo
591
-
592
- ```python
593
- from webscout.client import LLMChatCo
594
-
595
- # Initialize the client
596
- client = LLMChatCo()
597
-
598
- # Create a completion (non-streaming)
599
- response = client.chat.completions.create(
600
- model="gemini-flash-2.0", # Default model
601
- messages=[
602
- {"role": "system", "content": "You are a helpful assistant."},
603
- {"role": "user", "content": "Tell me about Python programming."}
604
- ],
605
- temperature=0.7
606
- )
607
-
608
- # Print the response
609
- print(response.choices[0].message.content)
610
- ```
611
-
612
- ### Streaming with LLMChatCo
613
-
614
- ```python
615
- from webscout.client import LLMChatCo
616
-
617
- # Initialize the client
618
- client = LLMChatCo()
619
-
620
- # Create a streaming completion
621
- stream = client.chat.completions.create(
622
- model="gemini-flash-2.0",
623
- messages=[
624
- {"role": "system", "content": "You are a helpful assistant."},
625
- {"role": "user", "content": "Write a short poem about programming."}
626
- ],
627
- stream=True
628
- )
629
-
630
- # Process the streaming response
631
- for chunk in stream:
632
- if chunk.choices[0].delta.content:
633
- print(chunk.choices[0].delta.content, end="", flush=True)
634
- print() # Add a newline at the end
635
- ```
636
-
637
- ### Basic Usage with YEPCHAT
638
-
639
- ```python
640
- from webscout.client import YEPCHAT
641
-
642
- # Initialize the client
643
- client = YEPCHAT()
644
-
645
- # Create a completion (non-streaming)
646
- response = client.chat.completions.create(
647
- model="DeepSeek-R1-Distill-Qwen-32B",
648
- messages=[
649
- {"role": "system", "content": "You are a helpful assistant."},
650
- {"role": "user", "content": "Tell me about Python programming."}
651
- ],
652
- temperature=0.7
653
- )
654
-
655
- # Print the response
656
- print(response.choices[0].message.content)
657
- ```
658
-
659
- ### Basic Usage with SonusAI
660
-
661
- ```python
662
- from webscout.client import SonusAI
663
-
664
- # Initialize the client
665
- client = SonusAI()
666
-
667
- # Create a completion (non-streaming)
668
- response = client.chat.completions.create(
669
- model="pro", # Choose from 'pro', 'air', or 'mini'
670
- messages=[
671
- {"role": "system", "content": "You are a helpful assistant."},
672
- {"role": "user", "content": "Tell me about Python programming."}
673
- ],
674
- reasoning=True # Optional: Enable reasoning mode
675
- )
676
-
677
- # Print the response
678
- print(response.choices[0].message.content)
679
- ```
680
-
681
- ### Streaming with YEPCHAT
682
-
683
- ```python
684
- from webscout.client import YEPCHAT
685
-
686
- # Initialize the client
687
- client = YEPCHAT()
688
-
689
- # Create a streaming completion
690
- stream = client.chat.completions.create(
691
- model="Mixtral-8x7B-Instruct-v0.1",
692
- messages=[
693
- {"role": "system", "content": "You are a helpful assistant."},
694
- {"role": "user", "content": "Write a short poem about programming."}
695
- ],
696
- stream=True
697
- )
698
-
699
- # Process the streaming response
700
- for chunk in stream:
701
- if chunk.choices[0].delta.content:
702
- print(chunk.choices[0].delta.content, end="", flush=True)
703
- print() # Add a newline at the end
704
- ```
705
-
706
- ### Streaming with SonusAI
707
-
708
- ```python
709
- from webscout.client import SonusAI
710
-
711
- # Initialize the client
712
- client = SonusAI(timeout=60)
713
-
714
- # Create a streaming completion
715
- stream = client.chat.completions.create(
716
- model="air",
717
- messages=[
718
- {"role": "system", "content": "You are a helpful assistant."},
719
- {"role": "user", "content": "Write a short poem about programming."}
720
- ],
721
- stream=True
722
- )
723
-
724
- # Process the streaming response
725
- for chunk in stream:
726
- if chunk.choices[0].delta.content:
727
- print(chunk.choices[0].delta.content, end="", flush=True)
728
- print() # Add a newline at the end
729
- ```
730
-
731
- ### Basic Usage with ExaChat
732
-
733
- ```python
734
- from webscout.client import ExaChat
735
-
736
- # Initialize the client
737
- client = ExaChat()
738
-
739
- # Create a completion (non-streaming)
740
- response = client.chat.completions.create(
741
- model="exaanswer", # Choose from many available models
742
- messages=[
743
- {"role": "system", "content": "You are a helpful assistant."},
744
- {"role": "user", "content": "Tell me about Python programming."}
745
- ]
746
- )
747
-
748
- # Print the response
749
- print(response.choices[0].message.content)
750
- ```
751
-
752
- ### Using Different ExaChat Providers
753
-
754
- ```python
755
- from webscout.client import ExaChat
756
-
757
- # Initialize the client
758
- client = ExaChat(timeout=60)
759
-
760
- # Use a Gemini model
761
- gemini_response = client.chat.completions.create(
762
- model="gemini-2.0-flash",
763
- messages=[
764
- {"role": "system", "content": "You are a helpful assistant."},
765
- {"role": "user", "content": "Explain quantum computing in simple terms."}
766
- ]
767
- )
768
-
769
- # Use a Groq model
770
- groq_response = client.chat.completions.create(
771
- model="llama-3.1-8b-instant",
772
- messages=[
773
- {"role": "user", "content": "Tell me about Python programming."}
774
- ]
775
- )
776
-
777
- # Print the response
778
- print(response.choices[0].message.content)
779
- ```
780
-
781
- ### Streaming with Netwrck
782
-
783
- ```python
784
- from webscout.client import Netwrck
785
-
786
- # Initialize the client
787
- client = Netwrck(timeout=60)
788
-
789
- # Create a streaming completion
790
- stream = client.chat.completions.create(
791
- model="openai/gpt-4o-mini",
792
- messages=[
793
- {"role": "system", "content": "You are a helpful assistant."},
794
- {"role": "user", "content": "Write a short poem about programming."}
795
- ],
796
- stream=True
797
- )
798
-
799
- # Process the streaming response
800
- for chunk in stream:
801
- if chunk.choices[0].delta.content:
802
- print(chunk.choices[0].delta.content, end="", flush=True)
803
- print() # Add a newline at the end
804
- ```
805
-
806
- ### Basic Usage with StandardInput
807
-
808
- ```python
809
- from webscout.client import StandardInput
810
-
811
- # Initialize the client
812
- client = StandardInput()
813
-
814
- # Create a completion (non-streaming)
815
- response = client.chat.completions.create(
816
- model="standard-quick",
817
- messages=[
818
- {"role": "system", "content": "You are a helpful assistant."},
819
- {"role": "user", "content": "Tell me about Python programming."}
820
- ]
821
- )
822
-
823
- # Print the response
824
- print(response.choices[0].message.content)
825
- ```
826
-
827
- ### Streaming with StandardInput
828
-
829
- ```python
830
- from webscout.client import StandardInput
831
-
832
- # Initialize the client
833
- client = StandardInput()
834
-
835
- # Create a streaming completion
836
- stream = client.chat.completions.create(
837
- model="standard-reasoning",
838
- messages=[
839
- {"role": "system", "content": "You are a helpful assistant."},
840
- {"role": "user", "content": "Count from 1 to 5."}
841
- ],
842
- stream=True,
843
- enable_reasoning=True # Enable reasoning capabilities
844
- )
845
-
846
- # Process the streaming response
847
- for chunk in stream:
848
- if chunk.choices[0].delta.content:
849
- print(chunk.choices[0].delta.content, end="", flush=True)
850
- print() # Add a newline at the end
851
- ```
852
-
853
- ## 🔄 Response Format
854
-
855
- All providers return responses that mimic the OpenAI API structure, ensuring compatibility with tools built for OpenAI.
856
-
857
- ### 📝 Non-streaming Response
858
-
859
- ```json
860
- {
861
- "id": "chatcmpl-123abc",
862
- "object": "chat.completion",
863
- "created": 1677858242,
864
- "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
865
- "usage": {
866
- "prompt_tokens": 13,
867
- "completion_tokens": 7,
868
- "total_tokens": 20
869
- },
870
- "choices": [
871
- {
872
- "message": {
873
- "role": "assistant",
874
- "content": "This is a response from the model."
875
- },
876
- "finish_reason": "stop",
877
- "index": 0
878
- }
879
- ]
880
- }
881
- ```
882
-
883
- ### 📱 Streaming Response Chunks
884
-
885
- ```json
886
- {
887
- "id": "chatcmpl-123abc",
888
- "object": "chat.completion.chunk",
889
- "created": 1677858242,
890
- "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
891
- "choices": [
892
- {
893
- "delta": {
894
- "content": "This "
895
- },
896
- "finish_reason": null,
897
- "index": 0
898
- }
899
- ]
900
- }
901
- ```
902
-
903
- ## 🧩 Architecture
904
-
905
- The OpenAI-compatible providers are built on a modular architecture:
906
-
907
- * `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
908
- * `utils.py`: Provides data structures that mimic OpenAI's response format
909
- * Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
910
-
911
- This architecture makes it easy to add new providers while maintaining a consistent interface.
912
-
913
- ## 📝 Notes
914
-
915
- * Some providers may require API keys for full functionality
916
- * Not all OpenAI features are supported by all providers
917
- * Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
918
-
919
- ## 🤝 Contributing
920
-
921
- Want to add a new OpenAI-compatible provider? Follow these steps:
922
-
923
- 1. Create a new file in the `webscout/Provider/OPENAI` directory
924
- 2. Implement the `OpenAICompatibleProvider` interface
925
- 3. Add appropriate tests
926
- 4. Update this README with information about the new provider
927
-
928
- ## 📚 Related Documentation
929
-
930
- * [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
931
- * [DeepInfra Documentation](https://deepinfra.com/docs)
932
- * [Glider.so Website](https://glider.so/)
933
- * [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
934
- * [X0GPT Website](https://x0-gpt.devwtf.in/)
935
- * [WiseCat Website](https://wise-cat-groq.vercel.app/)
936
- * [Venice AI Website](https://venice.ai/)
937
- * [ExaAI Website](https://o3minichat.exa.ai/)
938
- * [TypeGPT Website](https://chat.typegpt.net/)
939
- * [SciraChat Website](https://scira.ai/)
940
- * [FreeAIChat Website](https://freeaichatplayground.com/)
941
- * [LLMChatCo Website](https://llmchat.co/)
942
- * [Yep.com Website](https://yep.com/)
943
- * [HeckAI Website](https://heck.ai/)
944
- * [SonusAI Website](https://chat.sonus.ai/)
945
- * [ExaChat Website](https://exa-chat.vercel.app/)
946
- * [Netwrck Website](https://netwrck.com/)
947
- * [StandardInput Website](https://chat.standard-input.com/)
948
-
949
- <div align="center">
950
- <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
951
- <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
952
- </div>
1
+ <div align="center">
2
+ <a href="https://github.com/pyscout/Webscout">
3
+ <img src="https://img.shields.io/badge/WebScout-OpenAI%20Compatible%20Providers-4285F4?style=for-the-badge&logo=openai&logoColor=white" alt="WebScout OpenAI Compatible Providers">
4
+ </a>
5
+ <br/>
6
+ <h1>WebScout OpenAI-Compatible Providers</h1>
7
+ <p><strong>Seamlessly integrate with various AI providers using OpenAI-compatible interfaces</strong></p>
8
+
9
+ <p>
10
+ <img src="https://img.shields.io/badge/Python-3.7+-3776AB?style=flat-square&logo=python&logoColor=white" alt="Python 3.7+">
11
+ <img src="https://img.shields.io/badge/License-MIT-green?style=flat-square" alt="License: MIT">
12
+ <img src="https://img.shields.io/badge/PRs-Welcome-brightgreen?style=flat-square" alt="PRs Welcome">
13
+ </p>
14
+
15
+ <p>
16
+ Access multiple AI providers through a standardized OpenAI-compatible interface, making it easy to switch between providers without changing your code.
17
+ </p>
18
+ </div>
19
+
20
+ ## 🚀 Overview
21
+
22
+ The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
+
24
+ - Use the same code structure across different AI providers
25
+ - Switch between providers without major code changes
26
+ - Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
+
28
+ ## ⚙️ Available Providers
29
+
30
+ Currently, the following providers are implemented with OpenAI-compatible interfaces:
31
+
32
+ - DeepInfra
33
+ - Glider
34
+ - X0GPT
35
+ - WiseCat
36
+ - ExaAI
37
+ - TypeGPT
38
+ - LLMChatCo
39
+ - YEPCHAT
40
+ - HeckAI
41
+ - IBM
42
+ - SonusAI
43
+ - Ayle
44
+ - Netwrck
45
+ - StandardInput
46
+ - Writecream
47
+ - toolbaz
48
+ - UncovrAI
49
+ - OPKFC
50
+ - TextPollinations
51
+ - E2B
52
+ - MultiChatAI
53
+ - AI4Chat
54
+ - MCPCore
55
+ - TypefullyAI
56
+ - Flowith
57
+ - ChatSandbox
58
+ - Cloudflare
59
+ - NEMOTRON
60
+ - BLACKBOXAI
61
+ - Copilot
62
+ - TwoAI
63
+ - oivscode
64
+ - Qwen3
65
+ - TogetherAI
66
+ - PiAI
67
+ - FalconH1
68
+ - XenAI
69
+ - MonoChat
70
+ - Friendli
71
+ - MiniMax
72
+ - QodoAI
73
+ - Kimi
74
+ - GptOss
75
+
76
+ ## 💻 Usage Examples
77
+
78
+ Here are examples of how to use the OpenAI-compatible providers in your code.
79
+
80
+ ### Basic Usage with DeepInfra
81
+
82
+ ```python
83
+ from webscout.client import DeepInfra
84
+
85
+ # Initialize the client
86
+ client = DeepInfra()
87
+
88
+ # Create a completion (non-streaming)
89
+ response = client.chat.completions.create(
90
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
91
+ messages=[
92
+ {"role": "system", "content": "You are a helpful assistant."},
93
+ {"role": "user", "content": "Tell me about Python programming."}
94
+ ],
95
+ temperature=0.7,
96
+ max_tokens=500
97
+ )
98
+
99
+ # Print the response
100
+ print(response.choices[0].message.content)
101
+ ```
102
+
103
+ ### Basic Usage with Glider
104
+
105
+ ```python
106
+ from webscout.client import Glider
107
+
108
+ # Initialize the client
109
+ client = Glider()
110
+
111
+ # Create a completion (non-streaming)
112
+ response = client.chat.completions.create(
113
+ model="chat-llama-3-1-70b",
114
+ messages=[
115
+ {"role": "system", "content": "You are a helpful assistant."},
116
+ {"role": "user", "content": "Tell me about Python programming."}
117
+ ],
118
+ max_tokens=500
119
+ )
120
+
121
+ # Print the response
122
+ print(response.choices[0].message.content)
123
+ ```
124
+
125
+ ### Streaming Responses (Example with DeepInfra)
126
+
127
+ ```python
128
+ from webscout.client import DeepInfra
129
+
130
+ # Initialize the client
131
+ client = DeepInfra()
132
+
133
+ # Create a streaming completion
134
+ stream = client.chat.completions.create(
135
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
136
+ messages=[
137
+ {"role": "system", "content": "You are a helpful assistant."},
138
+ {"role": "user", "content": "Write a short poem about programming."}
139
+ ],
140
+ stream=True,
141
+ temperature=0.7
142
+ )
143
+
144
+ # Process the streaming response
145
+ for chunk in stream:
146
+ if chunk.choices[0].delta.content:
147
+ print(chunk.choices[0].delta.content, end="", flush=True)
148
+ print() # Add a newline at the end
149
+ ```
150
+
151
+ ### Streaming with Glider
152
+
153
+ ```python
154
+ from webscout.client import Glider
155
+
156
+ # Initialize the client
157
+ client = Glider()
158
+
159
+ # Create a streaming completion
160
+ stream = client.chat.completions.create(
161
+ model="chat-llama-3-1-70b",
162
+ messages=[
163
+ {"role": "system", "content": "You are a helpful assistant."},
164
+ {"role": "user", "content": "Write a short poem about programming."}
165
+ ],
166
+ stream=True
167
+ )
168
+
169
+ # Process the streaming response
170
+ for chunk in stream:
171
+ if chunk.choices[0].delta.content:
172
+ print(chunk.choices[0].delta.content, end="", flush=True)
173
+ print() # Add a newline at the end
174
+ ```
175
+
176
+
177
+ ### Basic Usage with X0GPT
178
+
179
+ ```python
180
+ from webscout.client import X0GPT
181
+
182
+ # Initialize the client
183
+ client = X0GPT()
184
+
185
+ # Create a completion (non-streaming)
186
+ response = client.chat.completions.create(
187
+ model="gpt-4", # Model name doesn't matter for X0GPT
188
+ messages=[
189
+ {"role": "system", "content": "You are a helpful assistant."},
190
+ {"role": "user", "content": "Tell me about Python programming."}
191
+ ]
192
+ )
193
+
194
+ # Print the response
195
+ print(response.choices[0].message.content)
196
+ ```
197
+
198
+ ### Streaming with X0GPT
199
+
200
+ ```python
201
+ from webscout.client import X0GPT
202
+
203
+ # Initialize the client
204
+ client = X0GPT()
205
+
206
+ # Create a streaming completion
207
+ stream = client.chat.completions.create(
208
+ model="gpt-4", # Model name doesn't matter for X0GPT
209
+ messages=[
210
+ {"role": "system", "content": "You are a helpful assistant."},
211
+ {"role": "user", "content": "Write a short poem about programming."}
212
+ ],
213
+ stream=True
214
+ )
215
+
216
+ # Process the streaming response
217
+ for chunk in stream:
218
+ if chunk.choices[0].delta.content:
219
+ print(chunk.choices[0].delta.content, end="", flush=True)
220
+ print() # Add a newline at the end
221
+ ```
222
+
223
+ ### Basic Usage with WiseCat
224
+
225
+ ```python
226
+ from webscout.client import WiseCat
227
+
228
+ # Initialize the client
229
+ client = WiseCat()
230
+
231
+ # Create a completion (non-streaming)
232
+ response = client.chat.completions.create(
233
+ model="chat-model-small",
234
+ messages=[
235
+ {"role": "system", "content": "You are a helpful assistant."},
236
+ {"role": "user", "content": "Tell me about Python programming."}
237
+ ]
238
+ )
239
+
240
+ # Print the response
241
+ print(response.choices[0].message.content)
242
+ ```
243
+
244
+ ### Streaming with WiseCat
245
+
246
+ ```python
247
+ from webscout.client import WiseCat
248
+
249
+ # Initialize the client
250
+ client = WiseCat()
251
+
252
+ # Create a streaming completion
253
+ stream = client.chat.completions.create(
254
+ model="chat-model-small",
255
+ messages=[
256
+ {"role": "system", "content": "You are a helpful assistant."},
257
+ {"role": "user", "content": "Write a short poem about programming."}
258
+ ],
259
+ stream=True
260
+ )
261
+
262
+ # Process the streaming response
263
+ for chunk in stream:
264
+ if chunk.choices[0].delta.content:
265
+ print(chunk.choices[0].delta.content, end="", flush=True)
266
+ print() # Add a newline at the end
267
+ ```
268
+
269
+ ### Basic Usage with ExaAI
270
+
271
+ ```python
272
+ from webscout.client import ExaAI
273
+
274
+ # Initialize the client
275
+ client = ExaAI()
276
+
277
+ # Create a completion (non-streaming)
278
+ response = client.chat.completions.create(
279
+ model="O3-Mini",
280
+ messages=[
281
+ # Note: ExaAI does not support system messages (they will be removed)
282
+ {"role": "user", "content": "Hello!"},
283
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
284
+ {"role": "user", "content": "Tell me about Python programming."}
285
+ ]
286
+ )
287
+
288
+ # Print the response
289
+ print(response.choices[0].message.content)
290
+ ```
291
+
292
+ ### Basic Usage with HeckAI
293
+
294
+ ```python
295
+ from webscout.client import HeckAI
296
+
297
+ # Initialize the client
298
+ client = HeckAI(language="English")
299
+
300
+ # Create a completion (non-streaming)
301
+ response = client.chat.completions.create(
302
+ model="google/gemini-2.0-flash-001",
303
+ messages=[
304
+ {"role": "system", "content": "You are a helpful assistant."},
305
+ {"role": "user", "content": "Tell me about Python programming."}
306
+ ]
307
+ )
308
+
309
+ # Print the response
310
+ print(response.choices[0].message.content)
311
+ ```
312
+
313
+ ### Streaming with HeckAI
314
+
315
+ ```python
316
+ from webscout.client import HeckAI
317
+
318
+ # Initialize the client
319
+ client = HeckAI()
320
+
321
+ # Create a streaming completion
322
+ stream = client.chat.completions.create(
323
+ model="google/gemini-2.0-flash-001",
324
+ messages=[
325
+ {"role": "system", "content": "You are a helpful assistant."},
326
+ {"role": "user", "content": "Write a short poem about programming."}
327
+ ],
328
+ stream=True
329
+ )
330
+
331
+ # Process the streaming response
332
+ for chunk in stream:
333
+ if chunk.choices[0].delta.content:
334
+ print(chunk.choices[0].delta.content, end="", flush=True)
335
+ print() # Add a newline at the end
336
+ ```
337
+
338
+ ### Basic Usage with IBM
339
+
340
+ ```python
341
+ from webscout.Provider.OPENAI.ibm import IBM
342
+
343
+ # Initialize the client
344
+ client = IBM(timeout=30)
345
+
346
+ # Create a completion (non-streaming)
347
+ response = client.chat.completions.create(
348
+ model="granite-chat",
349
+ messages=[
350
+ {"role": "user", "content": "Tell me about Python programming."}
351
+ ]
352
+ )
353
+
354
+ # Print the response
355
+ print(response.choices[0].message.content)
356
+ ```
357
+
358
+ ### Streaming with IBM
359
+
360
+ ```python
361
+ from webscout.Provider.OPENAI.ibm import IBM
362
+
363
+ # Initialize the client
364
+ client = IBM()
365
+
366
+ # Create a streaming completion
367
+ stream = client.chat.completions.create(
368
+ model="granite-chat",
369
+ messages=[
370
+ {"role": "user", "content": "Write a short poem about programming."}
371
+ ],
372
+ stream=True
373
+ )
374
+
375
+ # Process the streaming response
376
+ for chunk in stream:
377
+ if chunk.choices[0].delta.content:
378
+ print(chunk.choices[0].delta.content, end="", flush=True)
379
+ print() # Add a newline at the end
380
+ ```
381
+
382
+ ### Using Different IBM Granite Models
383
+
384
+ ```python
385
+ from webscout.Provider.OPENAI.ibm import IBM
386
+
387
+ # Initialize the client
388
+ client = IBM()
389
+
390
+ # Get available models
391
+ print("Available models:", client.models.list())
392
+
393
+ # Use granite-chat model
394
+ response = client.chat.completions.create(
395
+ model="granite-chat",
396
+ messages=[
397
+ {"role": "user", "content": "Explain quantum computing in simple terms."}
398
+ ]
399
+ )
400
+ print("Granite Chat:", response.choices[0].message.content)
401
+
402
+ # Use granite-search model (optimized for search tasks)
403
+ search_response = client.chat.completions.create(
404
+ model="granite-search",
405
+ messages=[
406
+ {"role": "user", "content": "What are the best practices for Python development?"}
407
+ ]
408
+ )
409
+ print("Granite Search:", search_response.choices[0].message.content)
410
+ ```
411
+
412
+ ### Streaming with ExaAI
413
+
414
+ ```python
415
+ from webscout.client import ExaAI
416
+
417
+ # Initialize the client
418
+ client = ExaAI()
419
+
420
+ # Create a streaming completion
421
+ stream = client.chat.completions.create(
422
+ model="O3-Mini",
423
+ messages=[
424
+ # Note: ExaAI does not support system messages (they will be removed)
425
+ {"role": "user", "content": "Hello!"},
426
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
427
+ {"role": "user", "content": "Write a short poem about programming."}
428
+ ],
429
+ stream=True
430
+ )
431
+
432
+ # Process the streaming response
433
+ for chunk in stream:
434
+ if chunk.choices[0].delta.content:
435
+ print(chunk.choices[0].delta.content, end="", flush=True)
436
+ print() # Add a newline at the end
437
+ ```
438
+
439
+ ### Basic Usage with TypeGPT
440
+
441
+ ```python
442
+ from webscout.client import TypeGPT
443
+
444
+ # Initialize the client
445
+ client = TypeGPT()
446
+
447
+ # Create a completion (non-streaming)
448
+ response = client.chat.completions.create(
449
+ model="chatgpt-4o-latest",
450
+ messages=[
451
+ {"role": "system", "content": "You are a helpful assistant."},
452
+ {"role": "user", "content": "Write a short poem about programming."}
453
+ ]
454
+ )
455
+
456
+ # Print the response
457
+ print(response.choices[0].message.content)
458
+ ```
459
+
460
+ ### Streaming with TypeGPT
461
+
462
+ ```python
463
+ from webscout.client import TypeGPT
464
+
465
+ # Initialize the client
466
+ client = TypeGPT()
467
+
468
+ # Create a streaming completion
469
+ stream = client.chat.completions.create(
470
+ model="chatgpt-4o-latest",
471
+ messages=[
472
+ {"role": "system", "content": "You are a helpful assistant."},
473
+ {"role": "user", "content": "Write a short poem about programming."}
474
+ ],
475
+ stream=True
476
+ )
477
+
478
+ # Process the streaming response
479
+ for chunk in stream:
480
+ if chunk.choices[0].delta.content:
481
+ print(chunk.choices[0].delta.content, end="", flush=True)
482
+ print() # Add a newline at the end
483
+ ```
484
+
485
+ ### Basic Usage with FreeAIChat
486
+
487
+ ```python
488
+ from webscout.client import FreeAIChat
489
+
490
+ # Initialize the client
491
+ client = FreeAIChat()
492
+
493
+ # Create a completion (non-streaming)
494
+ response = client.chat.completions.create(
495
+ model="GPT 4o",
496
+ messages=[
497
+ {"role": "system", "content": "You are a helpful assistant."},
498
+ {"role": "user", "content": "Tell me about Python programming."}
499
+ ]
500
+ )
501
+
502
+ # Print the response
503
+ print(response.choices[0].message.content)
504
+ ```
505
+
506
+ ### Streaming with FreeAIChat
507
+
508
+ ```python
509
+ from webscout.client import FreeAIChat
510
+
511
+ # Initialize the client
512
+ client = FreeAIChat()
513
+
514
+ # Create a streaming completion
515
+ stream = client.chat.completions.create(
516
+ model="GPT 4o",
517
+ messages=[
518
+ {"role": "system", "content": "You are a helpful assistant."},
519
+ {"role": "user", "content": "Write a short poem about programming."}
520
+ ],
521
+ stream=True
522
+ )
523
+
524
+ # Process the streaming response
525
+ for chunk in stream:
526
+ if chunk.choices[0].delta.content:
527
+ print(chunk.choices[0].delta.content, end="", flush=True)
528
+ print() # Add a newline at the end
529
+ ```
530
+
531
+ ### Basic Usage with LLMChatCo
532
+
533
+ ```python
534
+ from webscout.client import LLMChatCo
535
+
536
+ # Initialize the client
537
+ client = LLMChatCo()
538
+
539
+ # Create a completion (non-streaming)
540
+ response = client.chat.completions.create(
541
+ model="gemini-flash-2.0", # Default model
542
+ messages=[
543
+ {"role": "system", "content": "You are a helpful assistant."},
544
+ {"role": "user", "content": "Tell me about Python programming."}
545
+ ],
546
+ temperature=0.7
547
+ )
548
+
549
+ # Print the response
550
+ print(response.choices[0].message.content)
551
+ ```
552
+
553
+ ### Streaming with LLMChatCo
554
+
555
+ ```python
556
+ from webscout.client import LLMChatCo
557
+
558
+ # Initialize the client
559
+ client = LLMChatCo()
560
+
561
+ # Create a streaming completion
562
+ stream = client.chat.completions.create(
563
+ model="gemini-flash-2.0",
564
+ messages=[
565
+ {"role": "system", "content": "You are a helpful assistant."},
566
+ {"role": "user", "content": "Write a short poem about programming."}
567
+ ],
568
+ stream=True
569
+ )
570
+
571
+ # Process the streaming response
572
+ for chunk in stream:
573
+ if chunk.choices[0].delta.content:
574
+ print(chunk.choices[0].delta.content, end="", flush=True)
575
+ print() # Add a newline at the end
576
+ ```
577
+
578
+ ### Basic Usage with YEPCHAT
579
+
580
+ ```python
581
+ from webscout.client import YEPCHAT
582
+
583
+ # Initialize the client
584
+ client = YEPCHAT()
585
+
586
+ # Create a completion (non-streaming)
587
+ response = client.chat.completions.create(
588
+ model="DeepSeek-R1-Distill-Qwen-32B",
589
+ messages=[
590
+ {"role": "system", "content": "You are a helpful assistant."},
591
+ {"role": "user", "content": "Tell me about Python programming."}
592
+ ],
593
+ temperature=0.7
594
+ )
595
+
596
+ # Print the response
597
+ print(response.choices[0].message.content)
598
+ ```
599
+
600
+ ### Basic Usage with SonusAI
601
+
602
+ ```python
603
+ from webscout.client import SonusAI
604
+
605
+ # Initialize the client
606
+ client = SonusAI()
607
+
608
+ # Create a completion (non-streaming)
609
+ response = client.chat.completions.create(
610
+ model="pro", # Choose from 'pro', 'air', or 'mini'
611
+ messages=[
612
+ {"role": "system", "content": "You are a helpful assistant."},
613
+ {"role": "user", "content": "Tell me about Python programming."}
614
+ ],
615
+ reasoning=True # Optional: Enable reasoning mode
616
+ )
617
+
618
+ # Print the response
619
+ print(response.choices[0].message.content)
620
+ ```
621
+
622
+ ### Streaming with YEPCHAT
623
+
624
+ ```python
625
+ from webscout.client import YEPCHAT
626
+
627
+ # Initialize the client
628
+ client = YEPCHAT()
629
+
630
+ # Create a streaming completion
631
+ stream = client.chat.completions.create(
632
+ model="Mixtral-8x7B-Instruct-v0.1",
633
+ messages=[
634
+ {"role": "system", "content": "You are a helpful assistant."},
635
+ {"role": "user", "content": "Write a short poem about programming."}
636
+ ],
637
+ stream=True
638
+ )
639
+
640
+ # Process the streaming response
641
+ for chunk in stream:
642
+ if chunk.choices[0].delta.content:
643
+ print(chunk.choices[0].delta.content, end="", flush=True)
644
+ print() # Add a newline at the end
645
+ ```
646
+
647
+ ### Streaming with SonusAI
648
+
649
+ ```python
650
+ from webscout.client import SonusAI
651
+
652
+ # Initialize the client
653
+ client = SonusAI(timeout=60)
654
+
655
+ # Create a streaming completion
656
+ stream = client.chat.completions.create(
657
+ model="air",
658
+ messages=[
659
+ {"role": "system", "content": "You are a helpful assistant."},
660
+ {"role": "user", "content": "Write a short poem about programming."}
661
+ ],
662
+ stream=True
663
+ )
664
+
665
+ # Process the streaming response
666
+ for chunk in stream:
667
+ if chunk.choices[0].delta.content:
668
+ print(chunk.choices[0].delta.content, end="", flush=True)
669
+ print() # Add a newline at the end
670
+ ```
671
+
672
+ ### Basic Usage with Ayle
673
+
674
+ ```python
675
+ from webscout.client import Ayle
676
+
677
+ # Initialize the client
678
+ client = Ayle()
679
+
680
+ # Create a completion (non-streaming)
681
+ response = client.chat.completions.create(
682
+ model="gemini-2.5-flash", # Choose from many available models
683
+ messages=[
684
+ {"role": "system", "content": "You are a helpful assistant."},
685
+ {"role": "user", "content": "Tell me about Python programming."}
686
+ ]
687
+ )
688
+
689
+ # Print the response
690
+ print(response.choices[0].message.content)
691
+ ```
692
+
693
+ ### Using Different Ayle Models
694
+
695
+ ```python
696
+ from webscout.client import Ayle
697
+
698
+ # Initialize the client
699
+ client = Ayle(timeout=60)
700
+
701
+ # Use a Gemini model
702
+ gemini_response = client.chat.completions.create(
703
+ model="gemini-2.0-flash",
704
+ messages=[
705
+ {"role": "system", "content": "You are a helpful assistant."},
706
+ {"role": "user", "content": "Explain quantum computing in simple terms."}
707
+ ]
708
+ )
709
+
710
+ # Use a Groq model
711
+ groq_response = client.chat.completions.create(
712
+ model="llama-3.3-70b-versatile",
713
+ messages=[
714
+ {"role": "user", "content": "Tell me about Python programming."}
715
+ ]
716
+ )
717
+
718
+ # Print the response
719
+ print(gemini_response.choices[0].message.content)
720
+ ```
721
+
722
+ ### Streaming with Netwrck
723
+
724
+ ```python
725
+ from webscout.client import Netwrck
726
+
727
+ # Initialize the client
728
+ client = Netwrck(timeout=60)
729
+
730
+ # Create a streaming completion
731
+ stream = client.chat.completions.create(
732
+ model="openai/gpt-4o-mini",
733
+ messages=[
734
+ {"role": "system", "content": "You are a helpful assistant."},
735
+ {"role": "user", "content": "Write a short poem about programming."}
736
+ ],
737
+ stream=True
738
+ )
739
+
740
+ # Process the streaming response
741
+ for chunk in stream:
742
+ if chunk.choices[0].delta.content:
743
+ print(chunk.choices[0].delta.content, end="", flush=True)
744
+ print() # Add a newline at the end
745
+ ```
746
+
747
+ ### Basic Usage with StandardInput
748
+
749
+ ```python
750
+ from webscout.client import StandardInput
751
+
752
+ # Initialize the client
753
+ client = StandardInput()
754
+
755
+ # Create a completion (non-streaming)
756
+ response = client.chat.completions.create(
757
+ model="standard-quick",
758
+ messages=[
759
+ {"role": "system", "content": "You are a helpful assistant."},
760
+ {"role": "user", "content": "Tell me about Python programming."}
761
+ ]
762
+ )
763
+
764
+ # Print the response
765
+ print(response.choices[0].message.content)
766
+ ```
767
+
768
+ ### Streaming with StandardInput
769
+
770
+ ```python
771
+ from webscout.client import StandardInput
772
+
773
+ # Initialize the client
774
+ client = StandardInput()
775
+
776
+ # Create a streaming completion
777
+ stream = client.chat.completions.create(
778
+ model="standard-reasoning",
779
+ messages=[
780
+ {"role": "system", "content": "You are a helpful assistant."},
781
+ {"role": "user", "content": "Count from 1 to 5."}
782
+ ],
783
+ stream=True,
784
+ enable_reasoning=True # Enable reasoning capabilities
785
+ )
786
+
787
+ # Process the streaming response
788
+ for chunk in stream:
789
+ if chunk.choices[0].delta.content:
790
+ print(chunk.choices[0].delta.content, end="", flush=True)
791
+ print() # Add a newline at the end
792
+ ```
793
+
794
+ ## 🔄 Response Format
795
+
796
+ All providers return responses that mimic the OpenAI API structure, ensuring compatibility with tools built for OpenAI.
797
+
798
+ ### 📝 Non-streaming Response
799
+
800
+ ```json
801
+ {
802
+ "id": "chatcmpl-123abc",
803
+ "object": "chat.completion",
804
+ "created": 1677858242,
805
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
806
+ "usage": {
807
+ "prompt_tokens": 13,
808
+ "completion_tokens": 7,
809
+ "total_tokens": 20
810
+ },
811
+ "choices": [
812
+ {
813
+ "message": {
814
+ "role": "assistant",
815
+ "content": "This is a response from the model."
816
+ },
817
+ "finish_reason": "stop",
818
+ "index": 0
819
+ }
820
+ ]
821
+ }
822
+ ```
823
+
824
+ ### 📱 Streaming Response Chunks
825
+
826
+ ```json
827
+ {
828
+ "id": "chatcmpl-123abc",
829
+ "object": "chat.completion.chunk",
830
+ "created": 1677858242,
831
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
832
+ "choices": [
833
+ {
834
+ "delta": {
835
+ "content": "This "
836
+ },
837
+ "finish_reason": null,
838
+ "index": 0
839
+ }
840
+ ]
841
+ }
842
+ ```
843
+
844
+ ## 🧩 Architecture
845
+
846
+ The OpenAI-compatible providers are built on a modular architecture:
847
+
848
+ - `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
849
+ - `utils.py`: Provides data structures that mimic OpenAI's response format
850
+ - Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
851
+
852
+ This architecture makes it easy to add new providers while maintaining a consistent interface.
853
+
854
+ ## 📝 Notes
855
+
856
+ - Some providers may require API keys for full functionality
857
+ - Not all OpenAI features are supported by all providers
858
+ - Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
859
+
860
+ ## 🤝 Contributing
861
+
862
+ Want to add a new OpenAI-compatible provider? Follow these steps:
863
+
864
+ 1. Create a new file in the `webscout/Provider/OPENAI` directory
865
+ 2. Implement the `OpenAICompatibleProvider` interface
866
+ 3. Add appropriate tests
867
+ 4. Update this README with information about the new provider
868
+
869
+ ## 📚 Related Documentation
870
+
871
+ - [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
872
+ - [DeepInfra Documentation](https://deepinfra.com/docs)
873
+ - [Glider.so Website](https://glider.so/)
874
+ - [X0GPT Website](https://x0-gpt.devwtf.in/)
875
+ - [WiseCat Website](https://wise-cat-groq.vercel.app/)
876
+ - [ExaAI Website](https://o3minichat.exa.ai/)
877
+ - [Sambanova Website](https://sambanova.ai/)
878
+ - [DeepInfra Website](https://deepinfra.com/)
879
+ - [LLMChatCo Website](https://llmchat.co/)
880
+ - [Yep.com Website](https://yep.com/)
881
+ - [HeckAI Website](https://heck.ai/)
882
+ - [SonusAI Website](https://chat.sonus.ai/)
883
+ - [Ayle Website](https://ayle.chat/)
884
+ - [Netwrck Website](https://netwrck.com/)
885
+ - [StandardInput Website](https://chat.standard-input.com/)
886
+
887
+ <div align="center">
888
+ <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
889
+ <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
890
+ </div>