webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (304) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -58
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +33 -11
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -314
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -315
  64. webscout/Provider/Andi.py +3 -3
  65. webscout/Provider/Apriel.py +306 -0
  66. webscout/Provider/ChatGPTClone.py +236 -236
  67. webscout/Provider/ChatSandbox.py +343 -342
  68. webscout/Provider/Cloudflare.py +324 -324
  69. webscout/Provider/Cohere.py +208 -207
  70. webscout/Provider/Deepinfra.py +370 -369
  71. webscout/Provider/ExaAI.py +260 -260
  72. webscout/Provider/ExaChat.py +308 -387
  73. webscout/Provider/Flowith.py +221 -221
  74. webscout/Provider/GMI.py +293 -0
  75. webscout/Provider/Gemini.py +164 -162
  76. webscout/Provider/GeminiProxy.py +167 -166
  77. webscout/Provider/GithubChat.py +371 -370
  78. webscout/Provider/Groq.py +800 -800
  79. webscout/Provider/HeckAI.py +383 -379
  80. webscout/Provider/Jadve.py +282 -297
  81. webscout/Provider/K2Think.py +308 -0
  82. webscout/Provider/Koboldai.py +206 -384
  83. webscout/Provider/LambdaChat.py +423 -425
  84. webscout/Provider/Nemotron.py +244 -245
  85. webscout/Provider/Netwrck.py +248 -247
  86. webscout/Provider/OLLAMA.py +395 -394
  87. webscout/Provider/OPENAI/Cloudflare.py +394 -395
  88. webscout/Provider/OPENAI/FalconH1.py +452 -457
  89. webscout/Provider/OPENAI/FreeGemini.py +297 -299
  90. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
  91. webscout/Provider/OPENAI/NEMOTRON.py +241 -244
  92. webscout/Provider/OPENAI/PI.py +428 -427
  93. webscout/Provider/OPENAI/README.md +959 -959
  94. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  95. webscout/Provider/OPENAI/TwoAI.py +466 -467
  96. webscout/Provider/OPENAI/__init__.py +33 -59
  97. webscout/Provider/OPENAI/ai4chat.py +313 -303
  98. webscout/Provider/OPENAI/base.py +249 -269
  99. webscout/Provider/OPENAI/chatglm.py +528 -0
  100. webscout/Provider/OPENAI/chatgpt.py +593 -588
  101. webscout/Provider/OPENAI/chatgptclone.py +521 -524
  102. webscout/Provider/OPENAI/chatsandbox.py +202 -177
  103. webscout/Provider/OPENAI/deepinfra.py +319 -315
  104. webscout/Provider/OPENAI/e2b.py +1665 -1665
  105. webscout/Provider/OPENAI/exaai.py +420 -420
  106. webscout/Provider/OPENAI/exachat.py +452 -452
  107. webscout/Provider/OPENAI/friendli.py +232 -232
  108. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  109. webscout/Provider/OPENAI/groq.py +364 -364
  110. webscout/Provider/OPENAI/heckai.py +314 -311
  111. webscout/Provider/OPENAI/llmchatco.py +337 -337
  112. webscout/Provider/OPENAI/netwrck.py +355 -354
  113. webscout/Provider/OPENAI/oivscode.py +290 -290
  114. webscout/Provider/OPENAI/opkfc.py +518 -518
  115. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  116. webscout/Provider/OPENAI/scirachat.py +535 -529
  117. webscout/Provider/OPENAI/sonus.py +308 -308
  118. webscout/Provider/OPENAI/standardinput.py +442 -442
  119. webscout/Provider/OPENAI/textpollinations.py +340 -348
  120. webscout/Provider/OPENAI/toolbaz.py +419 -413
  121. webscout/Provider/OPENAI/typefully.py +362 -362
  122. webscout/Provider/OPENAI/utils.py +295 -295
  123. webscout/Provider/OPENAI/venice.py +436 -436
  124. webscout/Provider/OPENAI/wisecat.py +387 -387
  125. webscout/Provider/OPENAI/writecream.py +166 -166
  126. webscout/Provider/OPENAI/x0gpt.py +378 -378
  127. webscout/Provider/OPENAI/yep.py +389 -389
  128. webscout/Provider/OpenGPT.py +230 -230
  129. webscout/Provider/Openai.py +244 -496
  130. webscout/Provider/PI.py +405 -404
  131. webscout/Provider/Perplexitylabs.py +430 -431
  132. webscout/Provider/QwenLM.py +272 -254
  133. webscout/Provider/STT/__init__.py +32 -2
  134. webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
  135. webscout/Provider/StandardInput.py +309 -309
  136. webscout/Provider/TTI/README.md +82 -82
  137. webscout/Provider/TTI/__init__.py +33 -12
  138. webscout/Provider/TTI/aiarta.py +413 -413
  139. webscout/Provider/TTI/base.py +136 -136
  140. webscout/Provider/TTI/bing.py +243 -243
  141. webscout/Provider/TTI/gpt1image.py +149 -149
  142. webscout/Provider/TTI/imagen.py +196 -196
  143. webscout/Provider/TTI/infip.py +211 -211
  144. webscout/Provider/TTI/magicstudio.py +232 -232
  145. webscout/Provider/TTI/monochat.py +219 -219
  146. webscout/Provider/TTI/piclumen.py +214 -214
  147. webscout/Provider/TTI/pixelmuse.py +232 -232
  148. webscout/Provider/TTI/pollinations.py +232 -232
  149. webscout/Provider/TTI/together.py +288 -288
  150. webscout/Provider/TTI/utils.py +12 -12
  151. webscout/Provider/TTI/venice.py +367 -367
  152. webscout/Provider/TTS/README.md +192 -192
  153. webscout/Provider/TTS/__init__.py +33 -10
  154. webscout/Provider/TTS/parler.py +110 -110
  155. webscout/Provider/TTS/streamElements.py +333 -333
  156. webscout/Provider/TTS/utils.py +280 -280
  157. webscout/Provider/TeachAnything.py +237 -236
  158. webscout/Provider/TextPollinationsAI.py +311 -318
  159. webscout/Provider/TogetherAI.py +356 -357
  160. webscout/Provider/TwoAI.py +313 -569
  161. webscout/Provider/TypliAI.py +312 -311
  162. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  163. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  164. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
  165. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
  166. webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
  167. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  168. webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
  169. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  170. webscout/Provider/UNFINISHED/liner.py +334 -0
  171. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  172. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  173. webscout/Provider/UNFINISHED/samurai.py +223 -223
  174. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  175. webscout/Provider/Venice.py +251 -250
  176. webscout/Provider/VercelAI.py +256 -255
  177. webscout/Provider/WiseCat.py +232 -231
  178. webscout/Provider/WrDoChat.py +367 -366
  179. webscout/Provider/__init__.py +33 -86
  180. webscout/Provider/ai4chat.py +174 -174
  181. webscout/Provider/akashgpt.py +331 -334
  182. webscout/Provider/cerebras.py +446 -340
  183. webscout/Provider/chatglm.py +394 -214
  184. webscout/Provider/cleeai.py +211 -212
  185. webscout/Provider/deepseek_assistant.py +1 -1
  186. webscout/Provider/elmo.py +282 -282
  187. webscout/Provider/geminiapi.py +208 -208
  188. webscout/Provider/granite.py +261 -261
  189. webscout/Provider/hermes.py +263 -265
  190. webscout/Provider/julius.py +223 -222
  191. webscout/Provider/learnfastai.py +309 -309
  192. webscout/Provider/llama3mitril.py +214 -214
  193. webscout/Provider/llmchat.py +243 -243
  194. webscout/Provider/llmchatco.py +290 -290
  195. webscout/Provider/meta.py +801 -801
  196. webscout/Provider/oivscode.py +309 -309
  197. webscout/Provider/scira_chat.py +384 -457
  198. webscout/Provider/searchchat.py +292 -291
  199. webscout/Provider/sonus.py +258 -258
  200. webscout/Provider/toolbaz.py +370 -364
  201. webscout/Provider/turboseek.py +274 -265
  202. webscout/Provider/typefully.py +208 -207
  203. webscout/Provider/x0gpt.py +1 -0
  204. webscout/Provider/yep.py +372 -371
  205. webscout/__init__.py +30 -31
  206. webscout/__main__.py +5 -5
  207. webscout/auth/api_key_manager.py +189 -189
  208. webscout/auth/config.py +175 -175
  209. webscout/auth/models.py +185 -185
  210. webscout/auth/routes.py +664 -664
  211. webscout/auth/simple_logger.py +236 -236
  212. webscout/cli.py +523 -523
  213. webscout/conversation.py +438 -438
  214. webscout/exceptions.py +361 -361
  215. webscout/litagent/Readme.md +298 -298
  216. webscout/litagent/__init__.py +28 -28
  217. webscout/litagent/agent.py +581 -581
  218. webscout/litagent/constants.py +59 -59
  219. webscout/litprinter/__init__.py +58 -58
  220. webscout/models.py +181 -181
  221. webscout/optimizers.py +419 -419
  222. webscout/prompt_manager.py +288 -288
  223. webscout/sanitize.py +1078 -1078
  224. webscout/scout/README.md +401 -401
  225. webscout/scout/__init__.py +8 -8
  226. webscout/scout/core/__init__.py +6 -6
  227. webscout/scout/core/crawler.py +297 -297
  228. webscout/scout/core/scout.py +706 -706
  229. webscout/scout/core/search_result.py +95 -95
  230. webscout/scout/core/text_analyzer.py +62 -62
  231. webscout/scout/core/text_utils.py +277 -277
  232. webscout/scout/core/web_analyzer.py +51 -51
  233. webscout/scout/element.py +599 -599
  234. webscout/scout/parsers/__init__.py +69 -69
  235. webscout/scout/parsers/html5lib_parser.py +172 -172
  236. webscout/scout/parsers/html_parser.py +236 -236
  237. webscout/scout/parsers/lxml_parser.py +178 -178
  238. webscout/scout/utils.py +37 -37
  239. webscout/swiftcli/Readme.md +323 -323
  240. webscout/swiftcli/__init__.py +95 -95
  241. webscout/swiftcli/core/__init__.py +7 -7
  242. webscout/swiftcli/core/cli.py +308 -308
  243. webscout/swiftcli/core/context.py +104 -104
  244. webscout/swiftcli/core/group.py +241 -241
  245. webscout/swiftcli/decorators/__init__.py +28 -28
  246. webscout/swiftcli/decorators/command.py +221 -221
  247. webscout/swiftcli/decorators/options.py +220 -220
  248. webscout/swiftcli/decorators/output.py +302 -302
  249. webscout/swiftcli/exceptions.py +21 -21
  250. webscout/swiftcli/plugins/__init__.py +9 -9
  251. webscout/swiftcli/plugins/base.py +135 -135
  252. webscout/swiftcli/plugins/manager.py +269 -269
  253. webscout/swiftcli/utils/__init__.py +59 -59
  254. webscout/swiftcli/utils/formatting.py +252 -252
  255. webscout/swiftcli/utils/parsing.py +267 -267
  256. webscout/update_checker.py +117 -117
  257. webscout/version.py +1 -1
  258. webscout/webscout_search.py +1183 -1183
  259. webscout/webscout_search_async.py +649 -649
  260. webscout/yep_search.py +346 -346
  261. webscout/zeroart/README.md +89 -89
  262. webscout/zeroart/__init__.py +134 -134
  263. webscout/zeroart/base.py +66 -66
  264. webscout/zeroart/effects.py +100 -100
  265. webscout/zeroart/fonts.py +1238 -1238
  266. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
  267. webscout-2025.10.11.dist-info/RECORD +300 -0
  268. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  269. webscout/Provider/AllenAI.py +0 -440
  270. webscout/Provider/Blackboxai.py +0 -793
  271. webscout/Provider/FreeGemini.py +0 -250
  272. webscout/Provider/GptOss.py +0 -207
  273. webscout/Provider/Hunyuan.py +0 -283
  274. webscout/Provider/Kimi.py +0 -445
  275. webscout/Provider/MCPCore.py +0 -322
  276. webscout/Provider/MiniMax.py +0 -207
  277. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  278. webscout/Provider/OPENAI/MiniMax.py +0 -298
  279. webscout/Provider/OPENAI/Qwen3.py +0 -304
  280. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  281. webscout/Provider/OPENAI/copilot.py +0 -321
  282. webscout/Provider/OPENAI/gptoss.py +0 -288
  283. webscout/Provider/OPENAI/kimi.py +0 -469
  284. webscout/Provider/OPENAI/mcpcore.py +0 -431
  285. webscout/Provider/OPENAI/multichat.py +0 -378
  286. webscout/Provider/OPENAI/qodo.py +0 -630
  287. webscout/Provider/OPENAI/xenai.py +0 -514
  288. webscout/Provider/Reka.py +0 -214
  289. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  290. webscout/Provider/asksteve.py +0 -220
  291. webscout/Provider/copilot.py +0 -441
  292. webscout/Provider/freeaichat.py +0 -294
  293. webscout/Provider/koala.py +0 -182
  294. webscout/Provider/lmarena.py +0 -198
  295. webscout/Provider/monochat.py +0 -275
  296. webscout/Provider/multichat.py +0 -375
  297. webscout/Provider/scnet.py +0 -244
  298. webscout/Provider/talkai.py +0 -194
  299. webscout/tempid.py +0 -128
  300. webscout-8.3.6.dist-info/RECORD +0 -327
  301. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  302. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  303. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  304. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,959 +1,959 @@
1
- <div align="center">
2
- <a href="https://github.com/OEvortex/Webscout">
3
- <img src="https://img.shields.io/badge/WebScout-OpenAI%20Compatible%20Providers-4285F4?style=for-the-badge&logo=openai&logoColor=white" alt="WebScout OpenAI Compatible Providers">
4
- </a>
5
- <br/>
6
- <h1>WebScout OpenAI-Compatible Providers</h1>
7
- <p><strong>Seamlessly integrate with various AI providers using OpenAI-compatible interfaces</strong></p>
8
-
9
- <p>
10
- <img src="https://img.shields.io/badge/Python-3.7+-3776AB?style=flat-square&logo=python&logoColor=white" alt="Python 3.7+">
11
- <img src="https://img.shields.io/badge/License-MIT-green?style=flat-square" alt="License: MIT">
12
- <img src="https://img.shields.io/badge/PRs-Welcome-brightgreen?style=flat-square" alt="PRs Welcome">
13
- </p>
14
-
15
- <p>
16
- Access multiple AI providers through a standardized OpenAI-compatible interface, making it easy to switch between providers without changing your code.
17
- </p>
18
- </div>
19
-
20
- ## 🚀 Overview
21
-
22
- The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
-
24
- - Use the same code structure across different AI providers
25
- - Switch between providers without major code changes
26
- - Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
-
28
- ## ⚙️ Available Providers
29
-
30
- Currently, the following providers are implemented with OpenAI-compatible interfaces:
31
-
32
- - DeepInfra
33
- - Glider
34
- - ChatGPTClone
35
- - X0GPT
36
- - WiseCat
37
- - Venice
38
- - ExaAI
39
- - TypeGPT
40
- - SciraChat
41
- - LLMChatCo
42
- - YEPCHAT
43
- - HeckAI
44
- - SonusAI
45
- - ExaChat
46
- - Netwrck
47
- - StandardInput
48
- - Writecream
49
- - toolbaz
50
- - UncovrAI
51
- - OPKFC
52
- - TextPollinations
53
- - E2B
54
- - MultiChatAI
55
- - AI4Chat
56
- - MCPCore
57
- - TypefullyAI
58
- - Flowith
59
- - ChatSandbox
60
- - Cloudflare
61
- - NEMOTRON
62
- - BLACKBOXAI
63
- - Copilot
64
- - TwoAI
65
- - oivscode
66
- - Qwen3
67
- - TogetherAI
68
- - PiAI
69
- - FalconH1
70
- - XenAI
71
- - GeminiProxy
72
- - MonoChat
73
- - Friendli
74
- - MiniMax
75
- - QodoAI
76
- - Kimi
77
- - GptOss
78
- ## 💻 Usage Examples
79
-
80
- Here are examples of how to use the OpenAI-compatible providers in your code.
81
-
82
- ### Basic Usage with DeepInfra
83
-
84
- ```python
85
- from webscout.client import DeepInfra
86
-
87
- # Initialize the client
88
- client = DeepInfra()
89
-
90
- # Create a completion (non-streaming)
91
- response = client.chat.completions.create(
92
- model="meta-llama/Meta-Llama-3.1-8B-Instruct",
93
- messages=[
94
- {"role": "system", "content": "You are a helpful assistant."},
95
- {"role": "user", "content": "Tell me about Python programming."}
96
- ],
97
- temperature=0.7,
98
- max_tokens=500
99
- )
100
-
101
- # Print the response
102
- print(response.choices[0].message.content)
103
- ```
104
-
105
- ### Basic Usage with Glider
106
-
107
- ```python
108
- from webscout.client import Glider
109
-
110
- # Initialize the client
111
- client = Glider()
112
-
113
- # Create a completion (non-streaming)
114
- response = client.chat.completions.create(
115
- model="chat-llama-3-1-70b",
116
- messages=[
117
- {"role": "system", "content": "You are a helpful assistant."},
118
- {"role": "user", "content": "Tell me about Python programming."}
119
- ],
120
- max_tokens=500
121
- )
122
-
123
- # Print the response
124
- print(response.choices[0].message.content)
125
- ```
126
-
127
- ### Streaming Responses (Example with DeepInfra)
128
-
129
- ```python
130
- from webscout.client import DeepInfra
131
-
132
- # Initialize the client
133
- client = DeepInfra()
134
-
135
- # Create a streaming completion
136
- stream = client.chat.completions.create(
137
- model="meta-llama/Meta-Llama-3.1-8B-Instruct",
138
- messages=[
139
- {"role": "system", "content": "You are a helpful assistant."},
140
- {"role": "user", "content": "Write a short poem about programming."}
141
- ],
142
- stream=True,
143
- temperature=0.7
144
- )
145
-
146
- # Process the streaming response
147
- for chunk in stream:
148
- if chunk.choices[0].delta.content:
149
- print(chunk.choices[0].delta.content, end="", flush=True)
150
- print() # Add a newline at the end
151
- ```
152
-
153
- ### Streaming with Glider
154
-
155
- ```python
156
- from webscout.client import Glider
157
-
158
- # Initialize the client
159
- client = Glider()
160
-
161
- # Create a streaming completion
162
- stream = client.chat.completions.create(
163
- model="chat-llama-3-1-70b",
164
- messages=[
165
- {"role": "system", "content": "You are a helpful assistant."},
166
- {"role": "user", "content": "Write a short poem about programming."}
167
- ],
168
- stream=True
169
- )
170
-
171
- # Process the streaming response
172
- for chunk in stream:
173
- if chunk.choices[0].delta.content:
174
- print(chunk.choices[0].delta.content, end="", flush=True)
175
- print() # Add a newline at the end
176
- ```
177
-
178
- ### Basic Usage with ChatGPTClone
179
-
180
- ```python
181
- from webscout.client import ChatGPTClone
182
-
183
- # Initialize the client
184
- client = ChatGPTClone()
185
-
186
- # Create a completion (non-streaming)
187
- response = client.chat.completions.create(
188
- model="gpt-4",
189
- messages=[
190
- {"role": "system", "content": "You are a helpful assistant."},
191
- {"role": "user", "content": "Tell me about Python programming."}
192
- ],
193
- temperature=0.7
194
- )
195
-
196
- # Print the response
197
- print(response.choices[0].message.content)
198
- ```
199
-
200
- ### Streaming with ChatGPTClone
201
-
202
- ```python
203
- from webscout.client import ChatGPTClone
204
-
205
- # Initialize the client
206
- client = ChatGPTClone()
207
-
208
- # Create a streaming completion
209
- stream = client.chat.completions.create(
210
- model="gpt-4",
211
- messages=[
212
- {"role": "system", "content": "You are a helpful assistant."},
213
- {"role": "user", "content": "Write a short poem about programming."}
214
- ],
215
- stream=True
216
- )
217
-
218
- # Process the streaming response
219
- for chunk in stream:
220
- if chunk.choices[0].delta.content:
221
- print(chunk.choices[0].delta.content, end="", flush=True)
222
- print() # Add a newline at the end
223
- ```
224
-
225
- ### Basic Usage with X0GPT
226
-
227
- ```python
228
- from webscout.client import X0GPT
229
-
230
- # Initialize the client
231
- client = X0GPT()
232
-
233
- # Create a completion (non-streaming)
234
- response = client.chat.completions.create(
235
- model="gpt-4", # Model name doesn't matter for X0GPT
236
- messages=[
237
- {"role": "system", "content": "You are a helpful assistant."},
238
- {"role": "user", "content": "Tell me about Python programming."}
239
- ]
240
- )
241
-
242
- # Print the response
243
- print(response.choices[0].message.content)
244
- ```
245
-
246
- ### Streaming with X0GPT
247
-
248
- ```python
249
- from webscout.client import X0GPT
250
-
251
- # Initialize the client
252
- client = X0GPT()
253
-
254
- # Create a streaming completion
255
- stream = client.chat.completions.create(
256
- model="gpt-4", # Model name doesn't matter for X0GPT
257
- messages=[
258
- {"role": "system", "content": "You are a helpful assistant."},
259
- {"role": "user", "content": "Write a short poem about programming."}
260
- ],
261
- stream=True
262
- )
263
-
264
- # Process the streaming response
265
- for chunk in stream:
266
- if chunk.choices[0].delta.content:
267
- print(chunk.choices[0].delta.content, end="", flush=True)
268
- print() # Add a newline at the end
269
- ```
270
-
271
- ### Basic Usage with WiseCat
272
-
273
- ```python
274
- from webscout.client import WiseCat
275
-
276
- # Initialize the client
277
- client = WiseCat()
278
-
279
- # Create a completion (non-streaming)
280
- response = client.chat.completions.create(
281
- model="chat-model-small",
282
- messages=[
283
- {"role": "system", "content": "You are a helpful assistant."},
284
- {"role": "user", "content": "Tell me about Python programming."}
285
- ]
286
- )
287
-
288
- # Print the response
289
- print(response.choices[0].message.content)
290
- ```
291
-
292
- ### Streaming with WiseCat
293
-
294
- ```python
295
- from webscout.client import WiseCat
296
-
297
- # Initialize the client
298
- client = WiseCat()
299
-
300
- # Create a streaming completion
301
- stream = client.chat.completions.create(
302
- model="chat-model-small",
303
- messages=[
304
- {"role": "system", "content": "You are a helpful assistant."},
305
- {"role": "user", "content": "Write a short poem about programming."}
306
- ],
307
- stream=True
308
- )
309
-
310
- # Process the streaming response
311
- for chunk in stream:
312
- if chunk.choices[0].delta.content:
313
- print(chunk.choices[0].delta.content, end="", flush=True)
314
- print() # Add a newline at the end
315
- ```
316
-
317
- ### Basic Usage with Venice
318
-
319
- ```python
320
- from webscout.client import Venice
321
-
322
- # Initialize the client
323
- client = Venice(temperature=0.7, top_p=0.9)
324
-
325
- # Create a completion (non-streaming)
326
- response = client.chat.completions.create(
327
- model="mistral-31-24b",
328
- messages=[
329
- {"role": "system", "content": "You are a helpful assistant."},
330
- {"role": "user", "content": "Tell me about Python programming."}
331
- ]
332
- )
333
-
334
- # Print the response
335
- print(response.choices[0].message.content)
336
- ```
337
-
338
- ### Streaming with Venice
339
-
340
- ```python
341
- from webscout.client import Venice
342
-
343
- # Initialize the client
344
- client = Venice()
345
-
346
- # Create a streaming completion
347
- stream = client.chat.completions.create(
348
- model="mistral-31-24b",
349
- messages=[
350
- {"role": "system", "content": "You are a helpful assistant."},
351
- {"role": "user", "content": "Write a short poem about programming."}
352
- ],
353
- stream=True
354
- )
355
-
356
- # Process the streaming response
357
- for chunk in stream:
358
- if chunk.choices[0].delta.content:
359
- print(chunk.choices[0].delta.content, end="", flush=True)
360
- print() # Add a newline at the end
361
- ```
362
-
363
- ### Basic Usage with ExaAI
364
-
365
- ```python
366
- from webscout.client import ExaAI
367
-
368
- # Initialize the client
369
- client = ExaAI()
370
-
371
- # Create a completion (non-streaming)
372
- response = client.chat.completions.create(
373
- model="O3-Mini",
374
- messages=[
375
- # Note: ExaAI does not support system messages (they will be removed)
376
- {"role": "user", "content": "Hello!"},
377
- {"role": "assistant", "content": "Hi there! How can I help you today?"},
378
- {"role": "user", "content": "Tell me about Python programming."}
379
- ]
380
- )
381
-
382
- # Print the response
383
- print(response.choices[0].message.content)
384
- ```
385
-
386
- ### Basic Usage with HeckAI
387
-
388
- ```python
389
- from webscout.client import HeckAI
390
-
391
- # Initialize the client
392
- client = HeckAI(language="English")
393
-
394
- # Create a completion (non-streaming)
395
- response = client.chat.completions.create(
396
- model="google/gemini-2.0-flash-001",
397
- messages=[
398
- {"role": "system", "content": "You are a helpful assistant."},
399
- {"role": "user", "content": "Tell me about Python programming."}
400
- ]
401
- )
402
-
403
- # Print the response
404
- print(response.choices[0].message.content)
405
- ```
406
-
407
- ### Streaming with HeckAI
408
-
409
- ```python
410
- from webscout.client import HeckAI
411
-
412
- # Initialize the client
413
- client = HeckAI()
414
-
415
- # Create a streaming completion
416
- stream = client.chat.completions.create(
417
- model="google/gemini-2.0-flash-001",
418
- messages=[
419
- {"role": "system", "content": "You are a helpful assistant."},
420
- {"role": "user", "content": "Write a short poem about programming."}
421
- ],
422
- stream=True
423
- )
424
-
425
- # Process the streaming response
426
- for chunk in stream:
427
- if chunk.choices[0].delta.content:
428
- print(chunk.choices[0].delta.content, end="", flush=True)
429
- print() # Add a newline at the end
430
- ```
431
-
432
- ### Streaming with ExaAI
433
-
434
- ```python
435
- from webscout.client import ExaAI
436
-
437
- # Initialize the client
438
- client = ExaAI()
439
-
440
- # Create a streaming completion
441
- stream = client.chat.completions.create(
442
- model="O3-Mini",
443
- messages=[
444
- # Note: ExaAI does not support system messages (they will be removed)
445
- {"role": "user", "content": "Hello!"},
446
- {"role": "assistant", "content": "Hi there! How can I help you today?"},
447
- {"role": "user", "content": "Write a short poem about programming."}
448
- ],
449
- stream=True
450
- )
451
-
452
- # Process the streaming response
453
- for chunk in stream:
454
- if chunk.choices[0].delta.content:
455
- print(chunk.choices[0].delta.content, end="", flush=True)
456
- print() # Add a newline at the end
457
- ```
458
-
459
- ### Basic Usage with TypeGPT
460
-
461
- ```python
462
- from webscout.client import TypeGPT
463
-
464
- # Initialize the client
465
- client = TypeGPT()
466
-
467
- # Create a completion (non-streaming)
468
- response = client.chat.completions.create(
469
- model="chatgpt-4o-latest",
470
- messages=[
471
- {"role": "system", "content": "You are a helpful assistant."},
472
- {"role": "user", "content": "Write a short poem about programming."}
473
- ]
474
- )
475
-
476
- # Print the response
477
- print(response.choices[0].message.content)
478
- ```
479
-
480
- ### Streaming with TypeGPT
481
-
482
- ```python
483
- from webscout.client import TypeGPT
484
-
485
- # Initialize the client
486
- client = TypeGPT()
487
-
488
- # Create a streaming completion
489
- stream = client.chat.completions.create(
490
- model="chatgpt-4o-latest",
491
- messages=[
492
- {"role": "system", "content": "You are a helpful assistant."},
493
- {"role": "user", "content": "Write a short poem about programming."}
494
- ],
495
- stream=True
496
- )
497
-
498
- # Process the streaming response
499
- for chunk in stream:
500
- if chunk.choices[0].delta.content:
501
- print(chunk.choices[0].delta.content, end="", flush=True)
502
- print() # Add a newline at the end
503
- ```
504
-
505
- ### Basic Usage with SciraChat
506
-
507
- ```python
508
- from webscout.client import SciraChat
509
-
510
- # Initialize the client
511
- client = SciraChat()
512
-
513
- # Create a completion (non-streaming)
514
- response = client.chat.completions.create(
515
- model="scira-default",
516
- messages=[
517
- {"role": "system", "content": "You are a helpful assistant."},
518
- {"role": "user", "content": "Tell me about Python programming."}
519
- ]
520
- )
521
-
522
- # Print the response
523
- print(response.choices[0].message.content)
524
- ```
525
-
526
- ### Streaming with SciraChat
527
-
528
- ```python
529
- from webscout.client import SciraChat
530
-
531
- # Initialize the client
532
- client = SciraChat()
533
-
534
- # Create a streaming completion
535
- stream = client.chat.completions.create(
536
- model="scira-default",
537
- messages=[
538
- {"role": "system", "content": "You are a helpful assistant."},
539
- {"role": "user", "content": "Write a short poem about programming."}
540
- ],
541
- stream=True
542
- )
543
-
544
- # Process the streaming response
545
- for chunk in stream:
546
- if chunk.choices[0].delta.content:
547
- print(chunk.choices[0].delta.content, end="", flush=True)
548
- print() # Add a newline at the end
549
- ```
550
-
551
- ### Basic Usage with FreeAIChat
552
-
553
- ```python
554
- from webscout.client import FreeAIChat
555
-
556
- # Initialize the client
557
- client = FreeAIChat()
558
-
559
- # Create a completion (non-streaming)
560
- response = client.chat.completions.create(
561
- model="GPT 4o",
562
- messages=[
563
- {"role": "system", "content": "You are a helpful assistant."},
564
- {"role": "user", "content": "Tell me about Python programming."}
565
- ]
566
- )
567
-
568
- # Print the response
569
- print(response.choices[0].message.content)
570
- ```
571
-
572
- ### Streaming with FreeAIChat
573
-
574
- ```python
575
- from webscout.client import FreeAIChat
576
-
577
- # Initialize the client
578
- client = FreeAIChat()
579
-
580
- # Create a streaming completion
581
- stream = client.chat.completions.create(
582
- model="GPT 4o",
583
- messages=[
584
- {"role": "system", "content": "You are a helpful assistant."},
585
- {"role": "user", "content": "Write a short poem about programming."}
586
- ],
587
- stream=True
588
- )
589
-
590
- # Process the streaming response
591
- for chunk in stream:
592
- if chunk.choices[0].delta.content:
593
- print(chunk.choices[0].delta.content, end="", flush=True)
594
- print() # Add a newline at the end
595
- ```
596
-
597
- ### Basic Usage with LLMChatCo
598
-
599
- ```python
600
- from webscout.client import LLMChatCo
601
-
602
- # Initialize the client
603
- client = LLMChatCo()
604
-
605
- # Create a completion (non-streaming)
606
- response = client.chat.completions.create(
607
- model="gemini-flash-2.0", # Default model
608
- messages=[
609
- {"role": "system", "content": "You are a helpful assistant."},
610
- {"role": "user", "content": "Tell me about Python programming."}
611
- ],
612
- temperature=0.7
613
- )
614
-
615
- # Print the response
616
- print(response.choices[0].message.content)
617
- ```
618
-
619
- ### Streaming with LLMChatCo
620
-
621
- ```python
622
- from webscout.client import LLMChatCo
623
-
624
- # Initialize the client
625
- client = LLMChatCo()
626
-
627
- # Create a streaming completion
628
- stream = client.chat.completions.create(
629
- model="gemini-flash-2.0",
630
- messages=[
631
- {"role": "system", "content": "You are a helpful assistant."},
632
- {"role": "user", "content": "Write a short poem about programming."}
633
- ],
634
- stream=True
635
- )
636
-
637
- # Process the streaming response
638
- for chunk in stream:
639
- if chunk.choices[0].delta.content:
640
- print(chunk.choices[0].delta.content, end="", flush=True)
641
- print() # Add a newline at the end
642
- ```
643
-
644
- ### Basic Usage with YEPCHAT
645
-
646
- ```python
647
- from webscout.client import YEPCHAT
648
-
649
- # Initialize the client
650
- client = YEPCHAT()
651
-
652
- # Create a completion (non-streaming)
653
- response = client.chat.completions.create(
654
- model="DeepSeek-R1-Distill-Qwen-32B",
655
- messages=[
656
- {"role": "system", "content": "You are a helpful assistant."},
657
- {"role": "user", "content": "Tell me about Python programming."}
658
- ],
659
- temperature=0.7
660
- )
661
-
662
- # Print the response
663
- print(response.choices[0].message.content)
664
- ```
665
-
666
- ### Basic Usage with SonusAI
667
-
668
- ```python
669
- from webscout.client import SonusAI
670
-
671
- # Initialize the client
672
- client = SonusAI()
673
-
674
- # Create a completion (non-streaming)
675
- response = client.chat.completions.create(
676
- model="pro", # Choose from 'pro', 'air', or 'mini'
677
- messages=[
678
- {"role": "system", "content": "You are a helpful assistant."},
679
- {"role": "user", "content": "Tell me about Python programming."}
680
- ],
681
- reasoning=True # Optional: Enable reasoning mode
682
- )
683
-
684
- # Print the response
685
- print(response.choices[0].message.content)
686
- ```
687
-
688
- ### Streaming with YEPCHAT
689
-
690
- ```python
691
- from webscout.client import YEPCHAT
692
-
693
- # Initialize the client
694
- client = YEPCHAT()
695
-
696
- # Create a streaming completion
697
- stream = client.chat.completions.create(
698
- model="Mixtral-8x7B-Instruct-v0.1",
699
- messages=[
700
- {"role": "system", "content": "You are a helpful assistant."},
701
- {"role": "user", "content": "Write a short poem about programming."}
702
- ],
703
- stream=True
704
- )
705
-
706
- # Process the streaming response
707
- for chunk in stream:
708
- if chunk.choices[0].delta.content:
709
- print(chunk.choices[0].delta.content, end="", flush=True)
710
- print() # Add a newline at the end
711
- ```
712
-
713
- ### Streaming with SonusAI
714
-
715
- ```python
716
- from webscout.client import SonusAI
717
-
718
- # Initialize the client
719
- client = SonusAI(timeout=60)
720
-
721
- # Create a streaming completion
722
- stream = client.chat.completions.create(
723
- model="air",
724
- messages=[
725
- {"role": "system", "content": "You are a helpful assistant."},
726
- {"role": "user", "content": "Write a short poem about programming."}
727
- ],
728
- stream=True
729
- )
730
-
731
- # Process the streaming response
732
- for chunk in stream:
733
- if chunk.choices[0].delta.content:
734
- print(chunk.choices[0].delta.content, end="", flush=True)
735
- print() # Add a newline at the end
736
- ```
737
-
738
- ### Basic Usage with ExaChat
739
-
740
- ```python
741
- from webscout.client import ExaChat
742
-
743
- # Initialize the client
744
- client = ExaChat()
745
-
746
- # Create a completion (non-streaming)
747
- response = client.chat.completions.create(
748
- model="exaanswer", # Choose from many available models
749
- messages=[
750
- {"role": "system", "content": "You are a helpful assistant."},
751
- {"role": "user", "content": "Tell me about Python programming."}
752
- ]
753
- )
754
-
755
- # Print the response
756
- print(response.choices[0].message.content)
757
- ```
758
-
759
- ### Using Different ExaChat Providers
760
-
761
- ```python
762
- from webscout.client import ExaChat
763
-
764
- # Initialize the client
765
- client = ExaChat(timeout=60)
766
-
767
- # Use a Gemini model
768
- gemini_response = client.chat.completions.create(
769
- model="gemini-2.0-flash",
770
- messages=[
771
- {"role": "system", "content": "You are a helpful assistant."},
772
- {"role": "user", "content": "Explain quantum computing in simple terms."}
773
- ]
774
- )
775
-
776
- # Use a Groq model
777
- groq_response = client.chat.completions.create(
778
- model="llama-3.1-8b-instant",
779
- messages=[
780
- {"role": "user", "content": "Tell me about Python programming."}
781
- ]
782
- )
783
-
784
- # Print the response
785
- print(response.choices[0].message.content)
786
- ```
787
-
788
- ### Streaming with Netwrck
789
-
790
- ```python
791
- from webscout.client import Netwrck
792
-
793
- # Initialize the client
794
- client = Netwrck(timeout=60)
795
-
796
- # Create a streaming completion
797
- stream = client.chat.completions.create(
798
- model="openai/gpt-4o-mini",
799
- messages=[
800
- {"role": "system", "content": "You are a helpful assistant."},
801
- {"role": "user", "content": "Write a short poem about programming."}
802
- ],
803
- stream=True
804
- )
805
-
806
- # Process the streaming response
807
- for chunk in stream:
808
- if chunk.choices[0].delta.content:
809
- print(chunk.choices[0].delta.content, end="", flush=True)
810
- print() # Add a newline at the end
811
- ```
812
-
813
- ### Basic Usage with StandardInput
814
-
815
- ```python
816
- from webscout.client import StandardInput
817
-
818
- # Initialize the client
819
- client = StandardInput()
820
-
821
- # Create a completion (non-streaming)
822
- response = client.chat.completions.create(
823
- model="standard-quick",
824
- messages=[
825
- {"role": "system", "content": "You are a helpful assistant."},
826
- {"role": "user", "content": "Tell me about Python programming."}
827
- ]
828
- )
829
-
830
- # Print the response
831
- print(response.choices[0].message.content)
832
- ```
833
-
834
- ### Streaming with StandardInput
835
-
836
- ```python
837
- from webscout.client import StandardInput
838
-
839
- # Initialize the client
840
- client = StandardInput()
841
-
842
- # Create a streaming completion
843
- stream = client.chat.completions.create(
844
- model="standard-reasoning",
845
- messages=[
846
- {"role": "system", "content": "You are a helpful assistant."},
847
- {"role": "user", "content": "Count from 1 to 5."}
848
- ],
849
- stream=True,
850
- enable_reasoning=True # Enable reasoning capabilities
851
- )
852
-
853
- # Process the streaming response
854
- for chunk in stream:
855
- if chunk.choices[0].delta.content:
856
- print(chunk.choices[0].delta.content, end="", flush=True)
857
- print() # Add a newline at the end
858
- ```
859
-
860
- ## 🔄 Response Format
861
-
862
- All providers return responses that mimic the OpenAI API structure, ensuring compatibility with tools built for OpenAI.
863
-
864
- ### 📝 Non-streaming Response
865
-
866
- ```json
867
- {
868
- "id": "chatcmpl-123abc",
869
- "object": "chat.completion",
870
- "created": 1677858242,
871
- "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
872
- "usage": {
873
- "prompt_tokens": 13,
874
- "completion_tokens": 7,
875
- "total_tokens": 20
876
- },
877
- "choices": [
878
- {
879
- "message": {
880
- "role": "assistant",
881
- "content": "This is a response from the model."
882
- },
883
- "finish_reason": "stop",
884
- "index": 0
885
- }
886
- ]
887
- }
888
- ```
889
-
890
- ### 📱 Streaming Response Chunks
891
-
892
- ```json
893
- {
894
- "id": "chatcmpl-123abc",
895
- "object": "chat.completion.chunk",
896
- "created": 1677858242,
897
- "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
898
- "choices": [
899
- {
900
- "delta": {
901
- "content": "This "
902
- },
903
- "finish_reason": null,
904
- "index": 0
905
- }
906
- ]
907
- }
908
- ```
909
-
910
- ## 🧩 Architecture
911
-
912
- The OpenAI-compatible providers are built on a modular architecture:
913
-
914
- - `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
915
- - `utils.py`: Provides data structures that mimic OpenAI's response format
916
- - Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
917
-
918
- This architecture makes it easy to add new providers while maintaining a consistent interface.
919
-
920
- ## 📝 Notes
921
-
922
- - Some providers may require API keys for full functionality
923
- - Not all OpenAI features are supported by all providers
924
- - Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
925
-
926
- ## 🤝 Contributing
927
-
928
- Want to add a new OpenAI-compatible provider? Follow these steps:
929
-
930
- 1. Create a new file in the `webscout/Provider/OPENAI` directory
931
- 2. Implement the `OpenAICompatibleProvider` interface
932
- 3. Add appropriate tests
933
- 4. Update this README with information about the new provider
934
-
935
- ## 📚 Related Documentation
936
-
937
- - [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
938
- - [DeepInfra Documentation](https://deepinfra.com/docs)
939
- - [Glider.so Website](https://glider.so/)
940
- - [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
941
- - [X0GPT Website](https://x0-gpt.devwtf.in/)
942
- - [WiseCat Website](https://wise-cat-groq.vercel.app/)
943
- - [Venice AI Website](https://venice.ai/)
944
- - [ExaAI Website](https://o3minichat.exa.ai/)
945
- - [TypeGPT Website](https://chat.typegpt.net/)
946
- - [SciraChat Website](https://scira.ai/)
947
- - [FreeAIChat Website](https://freeaichatplayground.com/)
948
- - [LLMChatCo Website](https://llmchat.co/)
949
- - [Yep.com Website](https://yep.com/)
950
- - [HeckAI Website](https://heck.ai/)
951
- - [SonusAI Website](https://chat.sonus.ai/)
952
- - [ExaChat Website](https://exa-chat.vercel.app/)
953
- - [Netwrck Website](https://netwrck.com/)
954
- - [StandardInput Website](https://chat.standard-input.com/)
955
-
956
- <div align="center">
957
- <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
958
- <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
959
- </div>
1
+ <div align="center">
2
+ <a href="https://github.com/OEvortex/Webscout">
3
+ <img src="https://img.shields.io/badge/WebScout-OpenAI%20Compatible%20Providers-4285F4?style=for-the-badge&logo=openai&logoColor=white" alt="WebScout OpenAI Compatible Providers">
4
+ </a>
5
+ <br/>
6
+ <h1>WebScout OpenAI-Compatible Providers</h1>
7
+ <p><strong>Seamlessly integrate with various AI providers using OpenAI-compatible interfaces</strong></p>
8
+
9
+ <p>
10
+ <img src="https://img.shields.io/badge/Python-3.7+-3776AB?style=flat-square&logo=python&logoColor=white" alt="Python 3.7+">
11
+ <img src="https://img.shields.io/badge/License-MIT-green?style=flat-square" alt="License: MIT">
12
+ <img src="https://img.shields.io/badge/PRs-Welcome-brightgreen?style=flat-square" alt="PRs Welcome">
13
+ </p>
14
+
15
+ <p>
16
+ Access multiple AI providers through a standardized OpenAI-compatible interface, making it easy to switch between providers without changing your code.
17
+ </p>
18
+ </div>
19
+
20
+ ## 🚀 Overview
21
+
22
+ The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
+
24
+ - Use the same code structure across different AI providers
25
+ - Switch between providers without major code changes
26
+ - Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
+
28
+ ## ⚙️ Available Providers
29
+
30
+ Currently, the following providers are implemented with OpenAI-compatible interfaces:
31
+
32
+ - DeepInfra
33
+ - Glider
34
+ - ChatGPTClone
35
+ - X0GPT
36
+ - WiseCat
37
+ - Venice
38
+ - ExaAI
39
+ - TypeGPT
40
+ - SciraChat
41
+ - LLMChatCo
42
+ - YEPCHAT
43
+ - HeckAI
44
+ - SonusAI
45
+ - ExaChat
46
+ - Netwrck
47
+ - StandardInput
48
+ - Writecream
49
+ - toolbaz
50
+ - UncovrAI
51
+ - OPKFC
52
+ - TextPollinations
53
+ - E2B
54
+ - MultiChatAI
55
+ - AI4Chat
56
+ - MCPCore
57
+ - TypefullyAI
58
+ - Flowith
59
+ - ChatSandbox
60
+ - Cloudflare
61
+ - NEMOTRON
62
+ - BLACKBOXAI
63
+ - Copilot
64
+ - TwoAI
65
+ - oivscode
66
+ - Qwen3
67
+ - TogetherAI
68
+ - PiAI
69
+ - FalconH1
70
+ - XenAI
71
+ - GeminiProxy
72
+ - MonoChat
73
+ - Friendli
74
+ - MiniMax
75
+ - QodoAI
76
+ - Kimi
77
+ - GptOss
78
+ ## 💻 Usage Examples
79
+
80
+ Here are examples of how to use the OpenAI-compatible providers in your code.
81
+
82
+ ### Basic Usage with DeepInfra
83
+
84
+ ```python
85
+ from webscout.client import DeepInfra
86
+
87
+ # Initialize the client
88
+ client = DeepInfra()
89
+
90
+ # Create a completion (non-streaming)
91
+ response = client.chat.completions.create(
92
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
93
+ messages=[
94
+ {"role": "system", "content": "You are a helpful assistant."},
95
+ {"role": "user", "content": "Tell me about Python programming."}
96
+ ],
97
+ temperature=0.7,
98
+ max_tokens=500
99
+ )
100
+
101
+ # Print the response
102
+ print(response.choices[0].message.content)
103
+ ```
104
+
105
+ ### Basic Usage with Glider
106
+
107
+ ```python
108
+ from webscout.client import Glider
109
+
110
+ # Initialize the client
111
+ client = Glider()
112
+
113
+ # Create a completion (non-streaming)
114
+ response = client.chat.completions.create(
115
+ model="chat-llama-3-1-70b",
116
+ messages=[
117
+ {"role": "system", "content": "You are a helpful assistant."},
118
+ {"role": "user", "content": "Tell me about Python programming."}
119
+ ],
120
+ max_tokens=500
121
+ )
122
+
123
+ # Print the response
124
+ print(response.choices[0].message.content)
125
+ ```
126
+
127
+ ### Streaming Responses (Example with DeepInfra)
128
+
129
+ ```python
130
+ from webscout.client import DeepInfra
131
+
132
+ # Initialize the client
133
+ client = DeepInfra()
134
+
135
+ # Create a streaming completion
136
+ stream = client.chat.completions.create(
137
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
138
+ messages=[
139
+ {"role": "system", "content": "You are a helpful assistant."},
140
+ {"role": "user", "content": "Write a short poem about programming."}
141
+ ],
142
+ stream=True,
143
+ temperature=0.7
144
+ )
145
+
146
+ # Process the streaming response
147
+ for chunk in stream:
148
+ if chunk.choices[0].delta.content:
149
+ print(chunk.choices[0].delta.content, end="", flush=True)
150
+ print() # Add a newline at the end
151
+ ```
152
+
153
+ ### Streaming with Glider
154
+
155
+ ```python
156
+ from webscout.client import Glider
157
+
158
+ # Initialize the client
159
+ client = Glider()
160
+
161
+ # Create a streaming completion
162
+ stream = client.chat.completions.create(
163
+ model="chat-llama-3-1-70b",
164
+ messages=[
165
+ {"role": "system", "content": "You are a helpful assistant."},
166
+ {"role": "user", "content": "Write a short poem about programming."}
167
+ ],
168
+ stream=True
169
+ )
170
+
171
+ # Process the streaming response
172
+ for chunk in stream:
173
+ if chunk.choices[0].delta.content:
174
+ print(chunk.choices[0].delta.content, end="", flush=True)
175
+ print() # Add a newline at the end
176
+ ```
177
+
178
+ ### Basic Usage with ChatGPTClone
179
+
180
+ ```python
181
+ from webscout.client import ChatGPTClone
182
+
183
+ # Initialize the client
184
+ client = ChatGPTClone()
185
+
186
+ # Create a completion (non-streaming)
187
+ response = client.chat.completions.create(
188
+ model="gpt-4",
189
+ messages=[
190
+ {"role": "system", "content": "You are a helpful assistant."},
191
+ {"role": "user", "content": "Tell me about Python programming."}
192
+ ],
193
+ temperature=0.7
194
+ )
195
+
196
+ # Print the response
197
+ print(response.choices[0].message.content)
198
+ ```
199
+
200
+ ### Streaming with ChatGPTClone
201
+
202
+ ```python
203
+ from webscout.client import ChatGPTClone
204
+
205
+ # Initialize the client
206
+ client = ChatGPTClone()
207
+
208
+ # Create a streaming completion
209
+ stream = client.chat.completions.create(
210
+ model="gpt-4",
211
+ messages=[
212
+ {"role": "system", "content": "You are a helpful assistant."},
213
+ {"role": "user", "content": "Write a short poem about programming."}
214
+ ],
215
+ stream=True
216
+ )
217
+
218
+ # Process the streaming response
219
+ for chunk in stream:
220
+ if chunk.choices[0].delta.content:
221
+ print(chunk.choices[0].delta.content, end="", flush=True)
222
+ print() # Add a newline at the end
223
+ ```
224
+
225
+ ### Basic Usage with X0GPT
226
+
227
+ ```python
228
+ from webscout.client import X0GPT
229
+
230
+ # Initialize the client
231
+ client = X0GPT()
232
+
233
+ # Create a completion (non-streaming)
234
+ response = client.chat.completions.create(
235
+ model="gpt-4", # Model name doesn't matter for X0GPT
236
+ messages=[
237
+ {"role": "system", "content": "You are a helpful assistant."},
238
+ {"role": "user", "content": "Tell me about Python programming."}
239
+ ]
240
+ )
241
+
242
+ # Print the response
243
+ print(response.choices[0].message.content)
244
+ ```
245
+
246
+ ### Streaming with X0GPT
247
+
248
+ ```python
249
+ from webscout.client import X0GPT
250
+
251
+ # Initialize the client
252
+ client = X0GPT()
253
+
254
+ # Create a streaming completion
255
+ stream = client.chat.completions.create(
256
+ model="gpt-4", # Model name doesn't matter for X0GPT
257
+ messages=[
258
+ {"role": "system", "content": "You are a helpful assistant."},
259
+ {"role": "user", "content": "Write a short poem about programming."}
260
+ ],
261
+ stream=True
262
+ )
263
+
264
+ # Process the streaming response
265
+ for chunk in stream:
266
+ if chunk.choices[0].delta.content:
267
+ print(chunk.choices[0].delta.content, end="", flush=True)
268
+ print() # Add a newline at the end
269
+ ```
270
+
271
+ ### Basic Usage with WiseCat
272
+
273
+ ```python
274
+ from webscout.client import WiseCat
275
+
276
+ # Initialize the client
277
+ client = WiseCat()
278
+
279
+ # Create a completion (non-streaming)
280
+ response = client.chat.completions.create(
281
+ model="chat-model-small",
282
+ messages=[
283
+ {"role": "system", "content": "You are a helpful assistant."},
284
+ {"role": "user", "content": "Tell me about Python programming."}
285
+ ]
286
+ )
287
+
288
+ # Print the response
289
+ print(response.choices[0].message.content)
290
+ ```
291
+
292
+ ### Streaming with WiseCat
293
+
294
+ ```python
295
+ from webscout.client import WiseCat
296
+
297
+ # Initialize the client
298
+ client = WiseCat()
299
+
300
+ # Create a streaming completion
301
+ stream = client.chat.completions.create(
302
+ model="chat-model-small",
303
+ messages=[
304
+ {"role": "system", "content": "You are a helpful assistant."},
305
+ {"role": "user", "content": "Write a short poem about programming."}
306
+ ],
307
+ stream=True
308
+ )
309
+
310
+ # Process the streaming response
311
+ for chunk in stream:
312
+ if chunk.choices[0].delta.content:
313
+ print(chunk.choices[0].delta.content, end="", flush=True)
314
+ print() # Add a newline at the end
315
+ ```
316
+
317
+ ### Basic Usage with Venice
318
+
319
+ ```python
320
+ from webscout.client import Venice
321
+
322
+ # Initialize the client
323
+ client = Venice(temperature=0.7, top_p=0.9)
324
+
325
+ # Create a completion (non-streaming)
326
+ response = client.chat.completions.create(
327
+ model="mistral-31-24b",
328
+ messages=[
329
+ {"role": "system", "content": "You are a helpful assistant."},
330
+ {"role": "user", "content": "Tell me about Python programming."}
331
+ ]
332
+ )
333
+
334
+ # Print the response
335
+ print(response.choices[0].message.content)
336
+ ```
337
+
338
+ ### Streaming with Venice
339
+
340
+ ```python
341
+ from webscout.client import Venice
342
+
343
+ # Initialize the client
344
+ client = Venice()
345
+
346
+ # Create a streaming completion
347
+ stream = client.chat.completions.create(
348
+ model="mistral-31-24b",
349
+ messages=[
350
+ {"role": "system", "content": "You are a helpful assistant."},
351
+ {"role": "user", "content": "Write a short poem about programming."}
352
+ ],
353
+ stream=True
354
+ )
355
+
356
+ # Process the streaming response
357
+ for chunk in stream:
358
+ if chunk.choices[0].delta.content:
359
+ print(chunk.choices[0].delta.content, end="", flush=True)
360
+ print() # Add a newline at the end
361
+ ```
362
+
363
+ ### Basic Usage with ExaAI
364
+
365
+ ```python
366
+ from webscout.client import ExaAI
367
+
368
+ # Initialize the client
369
+ client = ExaAI()
370
+
371
+ # Create a completion (non-streaming)
372
+ response = client.chat.completions.create(
373
+ model="O3-Mini",
374
+ messages=[
375
+ # Note: ExaAI does not support system messages (they will be removed)
376
+ {"role": "user", "content": "Hello!"},
377
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
378
+ {"role": "user", "content": "Tell me about Python programming."}
379
+ ]
380
+ )
381
+
382
+ # Print the response
383
+ print(response.choices[0].message.content)
384
+ ```
385
+
386
+ ### Basic Usage with HeckAI
387
+
388
+ ```python
389
+ from webscout.client import HeckAI
390
+
391
+ # Initialize the client
392
+ client = HeckAI(language="English")
393
+
394
+ # Create a completion (non-streaming)
395
+ response = client.chat.completions.create(
396
+ model="google/gemini-2.0-flash-001",
397
+ messages=[
398
+ {"role": "system", "content": "You are a helpful assistant."},
399
+ {"role": "user", "content": "Tell me about Python programming."}
400
+ ]
401
+ )
402
+
403
+ # Print the response
404
+ print(response.choices[0].message.content)
405
+ ```
406
+
407
+ ### Streaming with HeckAI
408
+
409
+ ```python
410
+ from webscout.client import HeckAI
411
+
412
+ # Initialize the client
413
+ client = HeckAI()
414
+
415
+ # Create a streaming completion
416
+ stream = client.chat.completions.create(
417
+ model="google/gemini-2.0-flash-001",
418
+ messages=[
419
+ {"role": "system", "content": "You are a helpful assistant."},
420
+ {"role": "user", "content": "Write a short poem about programming."}
421
+ ],
422
+ stream=True
423
+ )
424
+
425
+ # Process the streaming response
426
+ for chunk in stream:
427
+ if chunk.choices[0].delta.content:
428
+ print(chunk.choices[0].delta.content, end="", flush=True)
429
+ print() # Add a newline at the end
430
+ ```
431
+
432
+ ### Streaming with ExaAI
433
+
434
+ ```python
435
+ from webscout.client import ExaAI
436
+
437
+ # Initialize the client
438
+ client = ExaAI()
439
+
440
+ # Create a streaming completion
441
+ stream = client.chat.completions.create(
442
+ model="O3-Mini",
443
+ messages=[
444
+ # Note: ExaAI does not support system messages (they will be removed)
445
+ {"role": "user", "content": "Hello!"},
446
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
447
+ {"role": "user", "content": "Write a short poem about programming."}
448
+ ],
449
+ stream=True
450
+ )
451
+
452
+ # Process the streaming response
453
+ for chunk in stream:
454
+ if chunk.choices[0].delta.content:
455
+ print(chunk.choices[0].delta.content, end="", flush=True)
456
+ print() # Add a newline at the end
457
+ ```
458
+
459
+ ### Basic Usage with TypeGPT
460
+
461
+ ```python
462
+ from webscout.client import TypeGPT
463
+
464
+ # Initialize the client
465
+ client = TypeGPT()
466
+
467
+ # Create a completion (non-streaming)
468
+ response = client.chat.completions.create(
469
+ model="chatgpt-4o-latest",
470
+ messages=[
471
+ {"role": "system", "content": "You are a helpful assistant."},
472
+ {"role": "user", "content": "Write a short poem about programming."}
473
+ ]
474
+ )
475
+
476
+ # Print the response
477
+ print(response.choices[0].message.content)
478
+ ```
479
+
480
+ ### Streaming with TypeGPT
481
+
482
+ ```python
483
+ from webscout.client import TypeGPT
484
+
485
+ # Initialize the client
486
+ client = TypeGPT()
487
+
488
+ # Create a streaming completion
489
+ stream = client.chat.completions.create(
490
+ model="chatgpt-4o-latest",
491
+ messages=[
492
+ {"role": "system", "content": "You are a helpful assistant."},
493
+ {"role": "user", "content": "Write a short poem about programming."}
494
+ ],
495
+ stream=True
496
+ )
497
+
498
+ # Process the streaming response
499
+ for chunk in stream:
500
+ if chunk.choices[0].delta.content:
501
+ print(chunk.choices[0].delta.content, end="", flush=True)
502
+ print() # Add a newline at the end
503
+ ```
504
+
505
+ ### Basic Usage with SciraChat
506
+
507
+ ```python
508
+ from webscout.client import SciraChat
509
+
510
+ # Initialize the client
511
+ client = SciraChat()
512
+
513
+ # Create a completion (non-streaming)
514
+ response = client.chat.completions.create(
515
+ model="scira-default",
516
+ messages=[
517
+ {"role": "system", "content": "You are a helpful assistant."},
518
+ {"role": "user", "content": "Tell me about Python programming."}
519
+ ]
520
+ )
521
+
522
+ # Print the response
523
+ print(response.choices[0].message.content)
524
+ ```
525
+
526
+ ### Streaming with SciraChat
527
+
528
+ ```python
529
+ from webscout.client import SciraChat
530
+
531
+ # Initialize the client
532
+ client = SciraChat()
533
+
534
+ # Create a streaming completion
535
+ stream = client.chat.completions.create(
536
+ model="scira-default",
537
+ messages=[
538
+ {"role": "system", "content": "You are a helpful assistant."},
539
+ {"role": "user", "content": "Write a short poem about programming."}
540
+ ],
541
+ stream=True
542
+ )
543
+
544
+ # Process the streaming response
545
+ for chunk in stream:
546
+ if chunk.choices[0].delta.content:
547
+ print(chunk.choices[0].delta.content, end="", flush=True)
548
+ print() # Add a newline at the end
549
+ ```
550
+
551
+ ### Basic Usage with FreeAIChat
552
+
553
+ ```python
554
+ from webscout.client import FreeAIChat
555
+
556
+ # Initialize the client
557
+ client = FreeAIChat()
558
+
559
+ # Create a completion (non-streaming)
560
+ response = client.chat.completions.create(
561
+ model="GPT 4o",
562
+ messages=[
563
+ {"role": "system", "content": "You are a helpful assistant."},
564
+ {"role": "user", "content": "Tell me about Python programming."}
565
+ ]
566
+ )
567
+
568
+ # Print the response
569
+ print(response.choices[0].message.content)
570
+ ```
571
+
572
+ ### Streaming with FreeAIChat
573
+
574
+ ```python
575
+ from webscout.client import FreeAIChat
576
+
577
+ # Initialize the client
578
+ client = FreeAIChat()
579
+
580
+ # Create a streaming completion
581
+ stream = client.chat.completions.create(
582
+ model="GPT 4o",
583
+ messages=[
584
+ {"role": "system", "content": "You are a helpful assistant."},
585
+ {"role": "user", "content": "Write a short poem about programming."}
586
+ ],
587
+ stream=True
588
+ )
589
+
590
+ # Process the streaming response
591
+ for chunk in stream:
592
+ if chunk.choices[0].delta.content:
593
+ print(chunk.choices[0].delta.content, end="", flush=True)
594
+ print() # Add a newline at the end
595
+ ```
596
+
597
+ ### Basic Usage with LLMChatCo
598
+
599
+ ```python
600
+ from webscout.client import LLMChatCo
601
+
602
+ # Initialize the client
603
+ client = LLMChatCo()
604
+
605
+ # Create a completion (non-streaming)
606
+ response = client.chat.completions.create(
607
+ model="gemini-flash-2.0", # Default model
608
+ messages=[
609
+ {"role": "system", "content": "You are a helpful assistant."},
610
+ {"role": "user", "content": "Tell me about Python programming."}
611
+ ],
612
+ temperature=0.7
613
+ )
614
+
615
+ # Print the response
616
+ print(response.choices[0].message.content)
617
+ ```
618
+
619
+ ### Streaming with LLMChatCo
620
+
621
+ ```python
622
+ from webscout.client import LLMChatCo
623
+
624
+ # Initialize the client
625
+ client = LLMChatCo()
626
+
627
+ # Create a streaming completion
628
+ stream = client.chat.completions.create(
629
+ model="gemini-flash-2.0",
630
+ messages=[
631
+ {"role": "system", "content": "You are a helpful assistant."},
632
+ {"role": "user", "content": "Write a short poem about programming."}
633
+ ],
634
+ stream=True
635
+ )
636
+
637
+ # Process the streaming response
638
+ for chunk in stream:
639
+ if chunk.choices[0].delta.content:
640
+ print(chunk.choices[0].delta.content, end="", flush=True)
641
+ print() # Add a newline at the end
642
+ ```
643
+
644
+ ### Basic Usage with YEPCHAT
645
+
646
+ ```python
647
+ from webscout.client import YEPCHAT
648
+
649
+ # Initialize the client
650
+ client = YEPCHAT()
651
+
652
+ # Create a completion (non-streaming)
653
+ response = client.chat.completions.create(
654
+ model="DeepSeek-R1-Distill-Qwen-32B",
655
+ messages=[
656
+ {"role": "system", "content": "You are a helpful assistant."},
657
+ {"role": "user", "content": "Tell me about Python programming."}
658
+ ],
659
+ temperature=0.7
660
+ )
661
+
662
+ # Print the response
663
+ print(response.choices[0].message.content)
664
+ ```
665
+
666
+ ### Basic Usage with SonusAI
667
+
668
+ ```python
669
+ from webscout.client import SonusAI
670
+
671
+ # Initialize the client
672
+ client = SonusAI()
673
+
674
+ # Create a completion (non-streaming)
675
+ response = client.chat.completions.create(
676
+ model="pro", # Choose from 'pro', 'air', or 'mini'
677
+ messages=[
678
+ {"role": "system", "content": "You are a helpful assistant."},
679
+ {"role": "user", "content": "Tell me about Python programming."}
680
+ ],
681
+ reasoning=True # Optional: Enable reasoning mode
682
+ )
683
+
684
+ # Print the response
685
+ print(response.choices[0].message.content)
686
+ ```
687
+
688
+ ### Streaming with YEPCHAT
689
+
690
+ ```python
691
+ from webscout.client import YEPCHAT
692
+
693
+ # Initialize the client
694
+ client = YEPCHAT()
695
+
696
+ # Create a streaming completion
697
+ stream = client.chat.completions.create(
698
+ model="Mixtral-8x7B-Instruct-v0.1",
699
+ messages=[
700
+ {"role": "system", "content": "You are a helpful assistant."},
701
+ {"role": "user", "content": "Write a short poem about programming."}
702
+ ],
703
+ stream=True
704
+ )
705
+
706
+ # Process the streaming response
707
+ for chunk in stream:
708
+ if chunk.choices[0].delta.content:
709
+ print(chunk.choices[0].delta.content, end="", flush=True)
710
+ print() # Add a newline at the end
711
+ ```
712
+
713
+ ### Streaming with SonusAI
714
+
715
+ ```python
716
+ from webscout.client import SonusAI
717
+
718
+ # Initialize the client
719
+ client = SonusAI(timeout=60)
720
+
721
+ # Create a streaming completion
722
+ stream = client.chat.completions.create(
723
+ model="air",
724
+ messages=[
725
+ {"role": "system", "content": "You are a helpful assistant."},
726
+ {"role": "user", "content": "Write a short poem about programming."}
727
+ ],
728
+ stream=True
729
+ )
730
+
731
+ # Process the streaming response
732
+ for chunk in stream:
733
+ if chunk.choices[0].delta.content:
734
+ print(chunk.choices[0].delta.content, end="", flush=True)
735
+ print() # Add a newline at the end
736
+ ```
737
+
738
+ ### Basic Usage with ExaChat
739
+
740
+ ```python
741
+ from webscout.client import ExaChat
742
+
743
+ # Initialize the client
744
+ client = ExaChat()
745
+
746
+ # Create a completion (non-streaming)
747
+ response = client.chat.completions.create(
748
+ model="exaanswer", # Choose from many available models
749
+ messages=[
750
+ {"role": "system", "content": "You are a helpful assistant."},
751
+ {"role": "user", "content": "Tell me about Python programming."}
752
+ ]
753
+ )
754
+
755
+ # Print the response
756
+ print(response.choices[0].message.content)
757
+ ```
758
+
759
+ ### Using Different ExaChat Providers
760
+
761
+ ```python
762
+ from webscout.client import ExaChat
763
+
764
+ # Initialize the client
765
+ client = ExaChat(timeout=60)
766
+
767
+ # Use a Gemini model
768
+ gemini_response = client.chat.completions.create(
769
+ model="gemini-2.0-flash",
770
+ messages=[
771
+ {"role": "system", "content": "You are a helpful assistant."},
772
+ {"role": "user", "content": "Explain quantum computing in simple terms."}
773
+ ]
774
+ )
775
+
776
+ # Use a Groq model
777
+ groq_response = client.chat.completions.create(
778
+ model="llama-3.1-8b-instant",
779
+ messages=[
780
+ {"role": "user", "content": "Tell me about Python programming."}
781
+ ]
782
+ )
783
+
784
+ # Print the response
785
+ print(response.choices[0].message.content)
786
+ ```
787
+
788
+ ### Streaming with Netwrck
789
+
790
+ ```python
791
+ from webscout.client import Netwrck
792
+
793
+ # Initialize the client
794
+ client = Netwrck(timeout=60)
795
+
796
+ # Create a streaming completion
797
+ stream = client.chat.completions.create(
798
+ model="openai/gpt-4o-mini",
799
+ messages=[
800
+ {"role": "system", "content": "You are a helpful assistant."},
801
+ {"role": "user", "content": "Write a short poem about programming."}
802
+ ],
803
+ stream=True
804
+ )
805
+
806
+ # Process the streaming response
807
+ for chunk in stream:
808
+ if chunk.choices[0].delta.content:
809
+ print(chunk.choices[0].delta.content, end="", flush=True)
810
+ print() # Add a newline at the end
811
+ ```
812
+
813
+ ### Basic Usage with StandardInput
814
+
815
+ ```python
816
+ from webscout.client import StandardInput
817
+
818
+ # Initialize the client
819
+ client = StandardInput()
820
+
821
+ # Create a completion (non-streaming)
822
+ response = client.chat.completions.create(
823
+ model="standard-quick",
824
+ messages=[
825
+ {"role": "system", "content": "You are a helpful assistant."},
826
+ {"role": "user", "content": "Tell me about Python programming."}
827
+ ]
828
+ )
829
+
830
+ # Print the response
831
+ print(response.choices[0].message.content)
832
+ ```
833
+
834
+ ### Streaming with StandardInput
835
+
836
+ ```python
837
+ from webscout.client import StandardInput
838
+
839
+ # Initialize the client
840
+ client = StandardInput()
841
+
842
+ # Create a streaming completion
843
+ stream = client.chat.completions.create(
844
+ model="standard-reasoning",
845
+ messages=[
846
+ {"role": "system", "content": "You are a helpful assistant."},
847
+ {"role": "user", "content": "Count from 1 to 5."}
848
+ ],
849
+ stream=True,
850
+ enable_reasoning=True # Enable reasoning capabilities
851
+ )
852
+
853
+ # Process the streaming response
854
+ for chunk in stream:
855
+ if chunk.choices[0].delta.content:
856
+ print(chunk.choices[0].delta.content, end="", flush=True)
857
+ print() # Add a newline at the end
858
+ ```
859
+
860
+ ## 🔄 Response Format
861
+
862
+ All providers return responses that mimic the OpenAI API structure, ensuring compatibility with tools built for OpenAI.
863
+
864
+ ### 📝 Non-streaming Response
865
+
866
+ ```json
867
+ {
868
+ "id": "chatcmpl-123abc",
869
+ "object": "chat.completion",
870
+ "created": 1677858242,
871
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
872
+ "usage": {
873
+ "prompt_tokens": 13,
874
+ "completion_tokens": 7,
875
+ "total_tokens": 20
876
+ },
877
+ "choices": [
878
+ {
879
+ "message": {
880
+ "role": "assistant",
881
+ "content": "This is a response from the model."
882
+ },
883
+ "finish_reason": "stop",
884
+ "index": 0
885
+ }
886
+ ]
887
+ }
888
+ ```
889
+
890
+ ### 📱 Streaming Response Chunks
891
+
892
+ ```json
893
+ {
894
+ "id": "chatcmpl-123abc",
895
+ "object": "chat.completion.chunk",
896
+ "created": 1677858242,
897
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
898
+ "choices": [
899
+ {
900
+ "delta": {
901
+ "content": "This "
902
+ },
903
+ "finish_reason": null,
904
+ "index": 0
905
+ }
906
+ ]
907
+ }
908
+ ```
909
+
910
+ ## 🧩 Architecture
911
+
912
+ The OpenAI-compatible providers are built on a modular architecture:
913
+
914
+ - `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
915
+ - `utils.py`: Provides data structures that mimic OpenAI's response format
916
+ - Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
917
+
918
+ This architecture makes it easy to add new providers while maintaining a consistent interface.
919
+
920
+ ## 📝 Notes
921
+
922
+ - Some providers may require API keys for full functionality
923
+ - Not all OpenAI features are supported by all providers
924
+ - Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
925
+
926
+ ## 🤝 Contributing
927
+
928
+ Want to add a new OpenAI-compatible provider? Follow these steps:
929
+
930
+ 1. Create a new file in the `webscout/Provider/OPENAI` directory
931
+ 2. Implement the `OpenAICompatibleProvider` interface
932
+ 3. Add appropriate tests
933
+ 4. Update this README with information about the new provider
934
+
935
+ ## 📚 Related Documentation
936
+
937
+ - [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
938
+ - [DeepInfra Documentation](https://deepinfra.com/docs)
939
+ - [Glider.so Website](https://glider.so/)
940
+ - [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
941
+ - [X0GPT Website](https://x0-gpt.devwtf.in/)
942
+ - [WiseCat Website](https://wise-cat-groq.vercel.app/)
943
+ - [Venice AI Website](https://venice.ai/)
944
+ - [ExaAI Website](https://o3minichat.exa.ai/)
945
+ - [TypeGPT Website](https://chat.typegpt.net/)
946
+ - [SciraChat Website](https://scira.ai/)
947
+ - [FreeAIChat Website](https://freeaichatplayground.com/)
948
+ - [LLMChatCo Website](https://llmchat.co/)
949
+ - [Yep.com Website](https://yep.com/)
950
+ - [HeckAI Website](https://heck.ai/)
951
+ - [SonusAI Website](https://chat.sonus.ai/)
952
+ - [ExaChat Website](https://exa-chat.vercel.app/)
953
+ - [Netwrck Website](https://netwrck.com/)
954
+ - [StandardInput Website](https://chat.standard-input.com/)
955
+
956
+ <div align="center">
957
+ <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
958
+ <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
959
+ </div>