webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (273) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -60
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +16 -1
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -316
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -314
  64. webscout/Provider/Apriel.py +306 -0
  65. webscout/Provider/ChatGPTClone.py +236 -236
  66. webscout/Provider/ChatSandbox.py +343 -343
  67. webscout/Provider/Cloudflare.py +324 -324
  68. webscout/Provider/Cohere.py +208 -208
  69. webscout/Provider/Deepinfra.py +370 -366
  70. webscout/Provider/ExaAI.py +260 -260
  71. webscout/Provider/ExaChat.py +308 -308
  72. webscout/Provider/Flowith.py +221 -221
  73. webscout/Provider/GMI.py +293 -0
  74. webscout/Provider/Gemini.py +164 -164
  75. webscout/Provider/GeminiProxy.py +167 -167
  76. webscout/Provider/GithubChat.py +371 -372
  77. webscout/Provider/Groq.py +800 -800
  78. webscout/Provider/HeckAI.py +383 -383
  79. webscout/Provider/Jadve.py +282 -282
  80. webscout/Provider/K2Think.py +307 -307
  81. webscout/Provider/Koboldai.py +205 -205
  82. webscout/Provider/LambdaChat.py +423 -423
  83. webscout/Provider/Nemotron.py +244 -244
  84. webscout/Provider/Netwrck.py +248 -248
  85. webscout/Provider/OLLAMA.py +395 -395
  86. webscout/Provider/OPENAI/Cloudflare.py +393 -393
  87. webscout/Provider/OPENAI/FalconH1.py +451 -451
  88. webscout/Provider/OPENAI/FreeGemini.py +296 -296
  89. webscout/Provider/OPENAI/K2Think.py +431 -431
  90. webscout/Provider/OPENAI/NEMOTRON.py +240 -240
  91. webscout/Provider/OPENAI/PI.py +427 -427
  92. webscout/Provider/OPENAI/README.md +959 -959
  93. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  94. webscout/Provider/OPENAI/TwoAI.py +465 -465
  95. webscout/Provider/OPENAI/__init__.py +33 -18
  96. webscout/Provider/OPENAI/base.py +248 -248
  97. webscout/Provider/OPENAI/chatglm.py +528 -0
  98. webscout/Provider/OPENAI/chatgpt.py +592 -592
  99. webscout/Provider/OPENAI/chatgptclone.py +521 -521
  100. webscout/Provider/OPENAI/chatsandbox.py +202 -202
  101. webscout/Provider/OPENAI/deepinfra.py +318 -314
  102. webscout/Provider/OPENAI/e2b.py +1665 -1665
  103. webscout/Provider/OPENAI/exaai.py +420 -420
  104. webscout/Provider/OPENAI/exachat.py +452 -452
  105. webscout/Provider/OPENAI/friendli.py +232 -232
  106. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  107. webscout/Provider/OPENAI/groq.py +364 -364
  108. webscout/Provider/OPENAI/heckai.py +314 -314
  109. webscout/Provider/OPENAI/llmchatco.py +337 -337
  110. webscout/Provider/OPENAI/netwrck.py +355 -355
  111. webscout/Provider/OPENAI/oivscode.py +290 -290
  112. webscout/Provider/OPENAI/opkfc.py +518 -518
  113. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  114. webscout/Provider/OPENAI/scirachat.py +535 -535
  115. webscout/Provider/OPENAI/sonus.py +308 -308
  116. webscout/Provider/OPENAI/standardinput.py +442 -442
  117. webscout/Provider/OPENAI/textpollinations.py +340 -340
  118. webscout/Provider/OPENAI/toolbaz.py +419 -416
  119. webscout/Provider/OPENAI/typefully.py +362 -362
  120. webscout/Provider/OPENAI/utils.py +295 -295
  121. webscout/Provider/OPENAI/venice.py +436 -436
  122. webscout/Provider/OPENAI/wisecat.py +387 -387
  123. webscout/Provider/OPENAI/writecream.py +166 -166
  124. webscout/Provider/OPENAI/x0gpt.py +378 -378
  125. webscout/Provider/OPENAI/yep.py +389 -389
  126. webscout/Provider/OpenGPT.py +230 -230
  127. webscout/Provider/Openai.py +243 -243
  128. webscout/Provider/PI.py +405 -405
  129. webscout/Provider/Perplexitylabs.py +430 -430
  130. webscout/Provider/QwenLM.py +272 -272
  131. webscout/Provider/STT/__init__.py +16 -1
  132. webscout/Provider/Sambanova.py +257 -257
  133. webscout/Provider/StandardInput.py +309 -309
  134. webscout/Provider/TTI/README.md +82 -82
  135. webscout/Provider/TTI/__init__.py +33 -18
  136. webscout/Provider/TTI/aiarta.py +413 -413
  137. webscout/Provider/TTI/base.py +136 -136
  138. webscout/Provider/TTI/bing.py +243 -243
  139. webscout/Provider/TTI/gpt1image.py +149 -149
  140. webscout/Provider/TTI/imagen.py +196 -196
  141. webscout/Provider/TTI/infip.py +211 -211
  142. webscout/Provider/TTI/magicstudio.py +232 -232
  143. webscout/Provider/TTI/monochat.py +219 -219
  144. webscout/Provider/TTI/piclumen.py +214 -214
  145. webscout/Provider/TTI/pixelmuse.py +232 -232
  146. webscout/Provider/TTI/pollinations.py +232 -232
  147. webscout/Provider/TTI/together.py +288 -288
  148. webscout/Provider/TTI/utils.py +12 -12
  149. webscout/Provider/TTI/venice.py +367 -367
  150. webscout/Provider/TTS/README.md +192 -192
  151. webscout/Provider/TTS/__init__.py +33 -18
  152. webscout/Provider/TTS/parler.py +110 -110
  153. webscout/Provider/TTS/streamElements.py +333 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TeachAnything.py +237 -237
  156. webscout/Provider/TextPollinationsAI.py +310 -310
  157. webscout/Provider/TogetherAI.py +356 -356
  158. webscout/Provider/TwoAI.py +312 -312
  159. webscout/Provider/TypliAI.py +311 -311
  160. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  161. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  162. webscout/Provider/UNFINISHED/GizAI.py +294 -294
  163. webscout/Provider/UNFINISHED/Marcus.py +198 -198
  164. webscout/Provider/UNFINISHED/Qodo.py +477 -477
  165. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  166. webscout/Provider/UNFINISHED/XenAI.py +324 -324
  167. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  168. webscout/Provider/UNFINISHED/liner.py +334 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  170. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  171. webscout/Provider/UNFINISHED/samurai.py +223 -223
  172. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  173. webscout/Provider/Venice.py +250 -250
  174. webscout/Provider/VercelAI.py +256 -256
  175. webscout/Provider/WiseCat.py +231 -231
  176. webscout/Provider/WrDoChat.py +366 -366
  177. webscout/Provider/__init__.py +33 -18
  178. webscout/Provider/ai4chat.py +174 -174
  179. webscout/Provider/akashgpt.py +331 -331
  180. webscout/Provider/cerebras.py +446 -446
  181. webscout/Provider/chatglm.py +394 -301
  182. webscout/Provider/cleeai.py +211 -211
  183. webscout/Provider/elmo.py +282 -282
  184. webscout/Provider/geminiapi.py +208 -208
  185. webscout/Provider/granite.py +261 -261
  186. webscout/Provider/hermes.py +263 -263
  187. webscout/Provider/julius.py +223 -223
  188. webscout/Provider/learnfastai.py +309 -309
  189. webscout/Provider/llama3mitril.py +214 -214
  190. webscout/Provider/llmchat.py +243 -243
  191. webscout/Provider/llmchatco.py +290 -290
  192. webscout/Provider/meta.py +801 -801
  193. webscout/Provider/oivscode.py +309 -309
  194. webscout/Provider/scira_chat.py +383 -383
  195. webscout/Provider/searchchat.py +292 -292
  196. webscout/Provider/sonus.py +258 -258
  197. webscout/Provider/toolbaz.py +370 -367
  198. webscout/Provider/turboseek.py +273 -273
  199. webscout/Provider/typefully.py +207 -207
  200. webscout/Provider/yep.py +372 -372
  201. webscout/__init__.py +30 -31
  202. webscout/__main__.py +5 -5
  203. webscout/auth/api_key_manager.py +189 -189
  204. webscout/auth/config.py +175 -175
  205. webscout/auth/models.py +185 -185
  206. webscout/auth/routes.py +664 -664
  207. webscout/auth/simple_logger.py +236 -236
  208. webscout/cli.py +523 -523
  209. webscout/conversation.py +438 -438
  210. webscout/exceptions.py +361 -361
  211. webscout/litagent/Readme.md +298 -298
  212. webscout/litagent/__init__.py +28 -28
  213. webscout/litagent/agent.py +581 -581
  214. webscout/litagent/constants.py +59 -59
  215. webscout/litprinter/__init__.py +58 -58
  216. webscout/models.py +181 -181
  217. webscout/optimizers.py +419 -419
  218. webscout/prompt_manager.py +288 -288
  219. webscout/sanitize.py +1078 -1078
  220. webscout/scout/README.md +401 -401
  221. webscout/scout/__init__.py +8 -8
  222. webscout/scout/core/__init__.py +6 -6
  223. webscout/scout/core/crawler.py +297 -297
  224. webscout/scout/core/scout.py +706 -706
  225. webscout/scout/core/search_result.py +95 -95
  226. webscout/scout/core/text_analyzer.py +62 -62
  227. webscout/scout/core/text_utils.py +277 -277
  228. webscout/scout/core/web_analyzer.py +51 -51
  229. webscout/scout/element.py +599 -599
  230. webscout/scout/parsers/__init__.py +69 -69
  231. webscout/scout/parsers/html5lib_parser.py +172 -172
  232. webscout/scout/parsers/html_parser.py +236 -236
  233. webscout/scout/parsers/lxml_parser.py +178 -178
  234. webscout/scout/utils.py +37 -37
  235. webscout/swiftcli/Readme.md +323 -323
  236. webscout/swiftcli/__init__.py +95 -95
  237. webscout/swiftcli/core/__init__.py +7 -7
  238. webscout/swiftcli/core/cli.py +308 -308
  239. webscout/swiftcli/core/context.py +104 -104
  240. webscout/swiftcli/core/group.py +241 -241
  241. webscout/swiftcli/decorators/__init__.py +28 -28
  242. webscout/swiftcli/decorators/command.py +221 -221
  243. webscout/swiftcli/decorators/options.py +220 -220
  244. webscout/swiftcli/decorators/output.py +302 -302
  245. webscout/swiftcli/exceptions.py +21 -21
  246. webscout/swiftcli/plugins/__init__.py +9 -9
  247. webscout/swiftcli/plugins/base.py +135 -135
  248. webscout/swiftcli/plugins/manager.py +269 -269
  249. webscout/swiftcli/utils/__init__.py +59 -59
  250. webscout/swiftcli/utils/formatting.py +252 -252
  251. webscout/swiftcli/utils/parsing.py +267 -267
  252. webscout/update_checker.py +117 -117
  253. webscout/version.py +1 -1
  254. webscout/webscout_search.py +1183 -1183
  255. webscout/webscout_search_async.py +649 -649
  256. webscout/yep_search.py +346 -346
  257. webscout/zeroart/README.md +89 -89
  258. webscout/zeroart/__init__.py +134 -134
  259. webscout/zeroart/base.py +66 -66
  260. webscout/zeroart/effects.py +100 -100
  261. webscout/zeroart/fonts.py +1238 -1238
  262. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
  263. webscout-2025.10.11.dist-info/RECORD +300 -0
  264. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  265. webscout/Provider/OPENAI/Qwen3.py +0 -303
  266. webscout/Provider/OPENAI/qodo.py +0 -630
  267. webscout/Provider/OPENAI/xenai.py +0 -514
  268. webscout/tempid.py +0 -134
  269. webscout-8.3.7.dist-info/RECORD +0 -301
  270. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  271. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  272. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  273. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,959 +1,959 @@
1
- <div align="center">
2
- <a href="https://github.com/OEvortex/Webscout">
3
- <img src="https://img.shields.io/badge/WebScout-OpenAI%20Compatible%20Providers-4285F4?style=for-the-badge&logo=openai&logoColor=white" alt="WebScout OpenAI Compatible Providers">
4
- </a>
5
- <br/>
6
- <h1>WebScout OpenAI-Compatible Providers</h1>
7
- <p><strong>Seamlessly integrate with various AI providers using OpenAI-compatible interfaces</strong></p>
8
-
9
- <p>
10
- <img src="https://img.shields.io/badge/Python-3.7+-3776AB?style=flat-square&logo=python&logoColor=white" alt="Python 3.7+">
11
- <img src="https://img.shields.io/badge/License-MIT-green?style=flat-square" alt="License: MIT">
12
- <img src="https://img.shields.io/badge/PRs-Welcome-brightgreen?style=flat-square" alt="PRs Welcome">
13
- </p>
14
-
15
- <p>
16
- Access multiple AI providers through a standardized OpenAI-compatible interface, making it easy to switch between providers without changing your code.
17
- </p>
18
- </div>
19
-
20
- ## 🚀 Overview
21
-
22
- The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
-
24
- - Use the same code structure across different AI providers
25
- - Switch between providers without major code changes
26
- - Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
-
28
- ## ⚙️ Available Providers
29
-
30
- Currently, the following providers are implemented with OpenAI-compatible interfaces:
31
-
32
- - DeepInfra
33
- - Glider
34
- - ChatGPTClone
35
- - X0GPT
36
- - WiseCat
37
- - Venice
38
- - ExaAI
39
- - TypeGPT
40
- - SciraChat
41
- - LLMChatCo
42
- - YEPCHAT
43
- - HeckAI
44
- - SonusAI
45
- - ExaChat
46
- - Netwrck
47
- - StandardInput
48
- - Writecream
49
- - toolbaz
50
- - UncovrAI
51
- - OPKFC
52
- - TextPollinations
53
- - E2B
54
- - MultiChatAI
55
- - AI4Chat
56
- - MCPCore
57
- - TypefullyAI
58
- - Flowith
59
- - ChatSandbox
60
- - Cloudflare
61
- - NEMOTRON
62
- - BLACKBOXAI
63
- - Copilot
64
- - TwoAI
65
- - oivscode
66
- - Qwen3
67
- - TogetherAI
68
- - PiAI
69
- - FalconH1
70
- - XenAI
71
- - GeminiProxy
72
- - MonoChat
73
- - Friendli
74
- - MiniMax
75
- - QodoAI
76
- - Kimi
77
- - GptOss
78
- ## 💻 Usage Examples
79
-
80
- Here are examples of how to use the OpenAI-compatible providers in your code.
81
-
82
- ### Basic Usage with DeepInfra
83
-
84
- ```python
85
- from webscout.client import DeepInfra
86
-
87
- # Initialize the client
88
- client = DeepInfra()
89
-
90
- # Create a completion (non-streaming)
91
- response = client.chat.completions.create(
92
- model="meta-llama/Meta-Llama-3.1-8B-Instruct",
93
- messages=[
94
- {"role": "system", "content": "You are a helpful assistant."},
95
- {"role": "user", "content": "Tell me about Python programming."}
96
- ],
97
- temperature=0.7,
98
- max_tokens=500
99
- )
100
-
101
- # Print the response
102
- print(response.choices[0].message.content)
103
- ```
104
-
105
- ### Basic Usage with Glider
106
-
107
- ```python
108
- from webscout.client import Glider
109
-
110
- # Initialize the client
111
- client = Glider()
112
-
113
- # Create a completion (non-streaming)
114
- response = client.chat.completions.create(
115
- model="chat-llama-3-1-70b",
116
- messages=[
117
- {"role": "system", "content": "You are a helpful assistant."},
118
- {"role": "user", "content": "Tell me about Python programming."}
119
- ],
120
- max_tokens=500
121
- )
122
-
123
- # Print the response
124
- print(response.choices[0].message.content)
125
- ```
126
-
127
- ### Streaming Responses (Example with DeepInfra)
128
-
129
- ```python
130
- from webscout.client import DeepInfra
131
-
132
- # Initialize the client
133
- client = DeepInfra()
134
-
135
- # Create a streaming completion
136
- stream = client.chat.completions.create(
137
- model="meta-llama/Meta-Llama-3.1-8B-Instruct",
138
- messages=[
139
- {"role": "system", "content": "You are a helpful assistant."},
140
- {"role": "user", "content": "Write a short poem about programming."}
141
- ],
142
- stream=True,
143
- temperature=0.7
144
- )
145
-
146
- # Process the streaming response
147
- for chunk in stream:
148
- if chunk.choices[0].delta.content:
149
- print(chunk.choices[0].delta.content, end="", flush=True)
150
- print() # Add a newline at the end
151
- ```
152
-
153
- ### Streaming with Glider
154
-
155
- ```python
156
- from webscout.client import Glider
157
-
158
- # Initialize the client
159
- client = Glider()
160
-
161
- # Create a streaming completion
162
- stream = client.chat.completions.create(
163
- model="chat-llama-3-1-70b",
164
- messages=[
165
- {"role": "system", "content": "You are a helpful assistant."},
166
- {"role": "user", "content": "Write a short poem about programming."}
167
- ],
168
- stream=True
169
- )
170
-
171
- # Process the streaming response
172
- for chunk in stream:
173
- if chunk.choices[0].delta.content:
174
- print(chunk.choices[0].delta.content, end="", flush=True)
175
- print() # Add a newline at the end
176
- ```
177
-
178
- ### Basic Usage with ChatGPTClone
179
-
180
- ```python
181
- from webscout.client import ChatGPTClone
182
-
183
- # Initialize the client
184
- client = ChatGPTClone()
185
-
186
- # Create a completion (non-streaming)
187
- response = client.chat.completions.create(
188
- model="gpt-4",
189
- messages=[
190
- {"role": "system", "content": "You are a helpful assistant."},
191
- {"role": "user", "content": "Tell me about Python programming."}
192
- ],
193
- temperature=0.7
194
- )
195
-
196
- # Print the response
197
- print(response.choices[0].message.content)
198
- ```
199
-
200
- ### Streaming with ChatGPTClone
201
-
202
- ```python
203
- from webscout.client import ChatGPTClone
204
-
205
- # Initialize the client
206
- client = ChatGPTClone()
207
-
208
- # Create a streaming completion
209
- stream = client.chat.completions.create(
210
- model="gpt-4",
211
- messages=[
212
- {"role": "system", "content": "You are a helpful assistant."},
213
- {"role": "user", "content": "Write a short poem about programming."}
214
- ],
215
- stream=True
216
- )
217
-
218
- # Process the streaming response
219
- for chunk in stream:
220
- if chunk.choices[0].delta.content:
221
- print(chunk.choices[0].delta.content, end="", flush=True)
222
- print() # Add a newline at the end
223
- ```
224
-
225
- ### Basic Usage with X0GPT
226
-
227
- ```python
228
- from webscout.client import X0GPT
229
-
230
- # Initialize the client
231
- client = X0GPT()
232
-
233
- # Create a completion (non-streaming)
234
- response = client.chat.completions.create(
235
- model="gpt-4", # Model name doesn't matter for X0GPT
236
- messages=[
237
- {"role": "system", "content": "You are a helpful assistant."},
238
- {"role": "user", "content": "Tell me about Python programming."}
239
- ]
240
- )
241
-
242
- # Print the response
243
- print(response.choices[0].message.content)
244
- ```
245
-
246
- ### Streaming with X0GPT
247
-
248
- ```python
249
- from webscout.client import X0GPT
250
-
251
- # Initialize the client
252
- client = X0GPT()
253
-
254
- # Create a streaming completion
255
- stream = client.chat.completions.create(
256
- model="gpt-4", # Model name doesn't matter for X0GPT
257
- messages=[
258
- {"role": "system", "content": "You are a helpful assistant."},
259
- {"role": "user", "content": "Write a short poem about programming."}
260
- ],
261
- stream=True
262
- )
263
-
264
- # Process the streaming response
265
- for chunk in stream:
266
- if chunk.choices[0].delta.content:
267
- print(chunk.choices[0].delta.content, end="", flush=True)
268
- print() # Add a newline at the end
269
- ```
270
-
271
- ### Basic Usage with WiseCat
272
-
273
- ```python
274
- from webscout.client import WiseCat
275
-
276
- # Initialize the client
277
- client = WiseCat()
278
-
279
- # Create a completion (non-streaming)
280
- response = client.chat.completions.create(
281
- model="chat-model-small",
282
- messages=[
283
- {"role": "system", "content": "You are a helpful assistant."},
284
- {"role": "user", "content": "Tell me about Python programming."}
285
- ]
286
- )
287
-
288
- # Print the response
289
- print(response.choices[0].message.content)
290
- ```
291
-
292
- ### Streaming with WiseCat
293
-
294
- ```python
295
- from webscout.client import WiseCat
296
-
297
- # Initialize the client
298
- client = WiseCat()
299
-
300
- # Create a streaming completion
301
- stream = client.chat.completions.create(
302
- model="chat-model-small",
303
- messages=[
304
- {"role": "system", "content": "You are a helpful assistant."},
305
- {"role": "user", "content": "Write a short poem about programming."}
306
- ],
307
- stream=True
308
- )
309
-
310
- # Process the streaming response
311
- for chunk in stream:
312
- if chunk.choices[0].delta.content:
313
- print(chunk.choices[0].delta.content, end="", flush=True)
314
- print() # Add a newline at the end
315
- ```
316
-
317
- ### Basic Usage with Venice
318
-
319
- ```python
320
- from webscout.client import Venice
321
-
322
- # Initialize the client
323
- client = Venice(temperature=0.7, top_p=0.9)
324
-
325
- # Create a completion (non-streaming)
326
- response = client.chat.completions.create(
327
- model="mistral-31-24b",
328
- messages=[
329
- {"role": "system", "content": "You are a helpful assistant."},
330
- {"role": "user", "content": "Tell me about Python programming."}
331
- ]
332
- )
333
-
334
- # Print the response
335
- print(response.choices[0].message.content)
336
- ```
337
-
338
- ### Streaming with Venice
339
-
340
- ```python
341
- from webscout.client import Venice
342
-
343
- # Initialize the client
344
- client = Venice()
345
-
346
- # Create a streaming completion
347
- stream = client.chat.completions.create(
348
- model="mistral-31-24b",
349
- messages=[
350
- {"role": "system", "content": "You are a helpful assistant."},
351
- {"role": "user", "content": "Write a short poem about programming."}
352
- ],
353
- stream=True
354
- )
355
-
356
- # Process the streaming response
357
- for chunk in stream:
358
- if chunk.choices[0].delta.content:
359
- print(chunk.choices[0].delta.content, end="", flush=True)
360
- print() # Add a newline at the end
361
- ```
362
-
363
- ### Basic Usage with ExaAI
364
-
365
- ```python
366
- from webscout.client import ExaAI
367
-
368
- # Initialize the client
369
- client = ExaAI()
370
-
371
- # Create a completion (non-streaming)
372
- response = client.chat.completions.create(
373
- model="O3-Mini",
374
- messages=[
375
- # Note: ExaAI does not support system messages (they will be removed)
376
- {"role": "user", "content": "Hello!"},
377
- {"role": "assistant", "content": "Hi there! How can I help you today?"},
378
- {"role": "user", "content": "Tell me about Python programming."}
379
- ]
380
- )
381
-
382
- # Print the response
383
- print(response.choices[0].message.content)
384
- ```
385
-
386
- ### Basic Usage with HeckAI
387
-
388
- ```python
389
- from webscout.client import HeckAI
390
-
391
- # Initialize the client
392
- client = HeckAI(language="English")
393
-
394
- # Create a completion (non-streaming)
395
- response = client.chat.completions.create(
396
- model="google/gemini-2.0-flash-001",
397
- messages=[
398
- {"role": "system", "content": "You are a helpful assistant."},
399
- {"role": "user", "content": "Tell me about Python programming."}
400
- ]
401
- )
402
-
403
- # Print the response
404
- print(response.choices[0].message.content)
405
- ```
406
-
407
- ### Streaming with HeckAI
408
-
409
- ```python
410
- from webscout.client import HeckAI
411
-
412
- # Initialize the client
413
- client = HeckAI()
414
-
415
- # Create a streaming completion
416
- stream = client.chat.completions.create(
417
- model="google/gemini-2.0-flash-001",
418
- messages=[
419
- {"role": "system", "content": "You are a helpful assistant."},
420
- {"role": "user", "content": "Write a short poem about programming."}
421
- ],
422
- stream=True
423
- )
424
-
425
- # Process the streaming response
426
- for chunk in stream:
427
- if chunk.choices[0].delta.content:
428
- print(chunk.choices[0].delta.content, end="", flush=True)
429
- print() # Add a newline at the end
430
- ```
431
-
432
- ### Streaming with ExaAI
433
-
434
- ```python
435
- from webscout.client import ExaAI
436
-
437
- # Initialize the client
438
- client = ExaAI()
439
-
440
- # Create a streaming completion
441
- stream = client.chat.completions.create(
442
- model="O3-Mini",
443
- messages=[
444
- # Note: ExaAI does not support system messages (they will be removed)
445
- {"role": "user", "content": "Hello!"},
446
- {"role": "assistant", "content": "Hi there! How can I help you today?"},
447
- {"role": "user", "content": "Write a short poem about programming."}
448
- ],
449
- stream=True
450
- )
451
-
452
- # Process the streaming response
453
- for chunk in stream:
454
- if chunk.choices[0].delta.content:
455
- print(chunk.choices[0].delta.content, end="", flush=True)
456
- print() # Add a newline at the end
457
- ```
458
-
459
- ### Basic Usage with TypeGPT
460
-
461
- ```python
462
- from webscout.client import TypeGPT
463
-
464
- # Initialize the client
465
- client = TypeGPT()
466
-
467
- # Create a completion (non-streaming)
468
- response = client.chat.completions.create(
469
- model="chatgpt-4o-latest",
470
- messages=[
471
- {"role": "system", "content": "You are a helpful assistant."},
472
- {"role": "user", "content": "Write a short poem about programming."}
473
- ]
474
- )
475
-
476
- # Print the response
477
- print(response.choices[0].message.content)
478
- ```
479
-
480
- ### Streaming with TypeGPT
481
-
482
- ```python
483
- from webscout.client import TypeGPT
484
-
485
- # Initialize the client
486
- client = TypeGPT()
487
-
488
- # Create a streaming completion
489
- stream = client.chat.completions.create(
490
- model="chatgpt-4o-latest",
491
- messages=[
492
- {"role": "system", "content": "You are a helpful assistant."},
493
- {"role": "user", "content": "Write a short poem about programming."}
494
- ],
495
- stream=True
496
- )
497
-
498
- # Process the streaming response
499
- for chunk in stream:
500
- if chunk.choices[0].delta.content:
501
- print(chunk.choices[0].delta.content, end="", flush=True)
502
- print() # Add a newline at the end
503
- ```
504
-
505
- ### Basic Usage with SciraChat
506
-
507
- ```python
508
- from webscout.client import SciraChat
509
-
510
- # Initialize the client
511
- client = SciraChat()
512
-
513
- # Create a completion (non-streaming)
514
- response = client.chat.completions.create(
515
- model="scira-default",
516
- messages=[
517
- {"role": "system", "content": "You are a helpful assistant."},
518
- {"role": "user", "content": "Tell me about Python programming."}
519
- ]
520
- )
521
-
522
- # Print the response
523
- print(response.choices[0].message.content)
524
- ```
525
-
526
- ### Streaming with SciraChat
527
-
528
- ```python
529
- from webscout.client import SciraChat
530
-
531
- # Initialize the client
532
- client = SciraChat()
533
-
534
- # Create a streaming completion
535
- stream = client.chat.completions.create(
536
- model="scira-default",
537
- messages=[
538
- {"role": "system", "content": "You are a helpful assistant."},
539
- {"role": "user", "content": "Write a short poem about programming."}
540
- ],
541
- stream=True
542
- )
543
-
544
- # Process the streaming response
545
- for chunk in stream:
546
- if chunk.choices[0].delta.content:
547
- print(chunk.choices[0].delta.content, end="", flush=True)
548
- print() # Add a newline at the end
549
- ```
550
-
551
- ### Basic Usage with FreeAIChat
552
-
553
- ```python
554
- from webscout.client import FreeAIChat
555
-
556
- # Initialize the client
557
- client = FreeAIChat()
558
-
559
- # Create a completion (non-streaming)
560
- response = client.chat.completions.create(
561
- model="GPT 4o",
562
- messages=[
563
- {"role": "system", "content": "You are a helpful assistant."},
564
- {"role": "user", "content": "Tell me about Python programming."}
565
- ]
566
- )
567
-
568
- # Print the response
569
- print(response.choices[0].message.content)
570
- ```
571
-
572
- ### Streaming with FreeAIChat
573
-
574
- ```python
575
- from webscout.client import FreeAIChat
576
-
577
- # Initialize the client
578
- client = FreeAIChat()
579
-
580
- # Create a streaming completion
581
- stream = client.chat.completions.create(
582
- model="GPT 4o",
583
- messages=[
584
- {"role": "system", "content": "You are a helpful assistant."},
585
- {"role": "user", "content": "Write a short poem about programming."}
586
- ],
587
- stream=True
588
- )
589
-
590
- # Process the streaming response
591
- for chunk in stream:
592
- if chunk.choices[0].delta.content:
593
- print(chunk.choices[0].delta.content, end="", flush=True)
594
- print() # Add a newline at the end
595
- ```
596
-
597
- ### Basic Usage with LLMChatCo
598
-
599
- ```python
600
- from webscout.client import LLMChatCo
601
-
602
- # Initialize the client
603
- client = LLMChatCo()
604
-
605
- # Create a completion (non-streaming)
606
- response = client.chat.completions.create(
607
- model="gemini-flash-2.0", # Default model
608
- messages=[
609
- {"role": "system", "content": "You are a helpful assistant."},
610
- {"role": "user", "content": "Tell me about Python programming."}
611
- ],
612
- temperature=0.7
613
- )
614
-
615
- # Print the response
616
- print(response.choices[0].message.content)
617
- ```
618
-
619
- ### Streaming with LLMChatCo
620
-
621
- ```python
622
- from webscout.client import LLMChatCo
623
-
624
- # Initialize the client
625
- client = LLMChatCo()
626
-
627
- # Create a streaming completion
628
- stream = client.chat.completions.create(
629
- model="gemini-flash-2.0",
630
- messages=[
631
- {"role": "system", "content": "You are a helpful assistant."},
632
- {"role": "user", "content": "Write a short poem about programming."}
633
- ],
634
- stream=True
635
- )
636
-
637
- # Process the streaming response
638
- for chunk in stream:
639
- if chunk.choices[0].delta.content:
640
- print(chunk.choices[0].delta.content, end="", flush=True)
641
- print() # Add a newline at the end
642
- ```
643
-
644
- ### Basic Usage with YEPCHAT
645
-
646
- ```python
647
- from webscout.client import YEPCHAT
648
-
649
- # Initialize the client
650
- client = YEPCHAT()
651
-
652
- # Create a completion (non-streaming)
653
- response = client.chat.completions.create(
654
- model="DeepSeek-R1-Distill-Qwen-32B",
655
- messages=[
656
- {"role": "system", "content": "You are a helpful assistant."},
657
- {"role": "user", "content": "Tell me about Python programming."}
658
- ],
659
- temperature=0.7
660
- )
661
-
662
- # Print the response
663
- print(response.choices[0].message.content)
664
- ```
665
-
666
- ### Basic Usage with SonusAI
667
-
668
- ```python
669
- from webscout.client import SonusAI
670
-
671
- # Initialize the client
672
- client = SonusAI()
673
-
674
- # Create a completion (non-streaming)
675
- response = client.chat.completions.create(
676
- model="pro", # Choose from 'pro', 'air', or 'mini'
677
- messages=[
678
- {"role": "system", "content": "You are a helpful assistant."},
679
- {"role": "user", "content": "Tell me about Python programming."}
680
- ],
681
- reasoning=True # Optional: Enable reasoning mode
682
- )
683
-
684
- # Print the response
685
- print(response.choices[0].message.content)
686
- ```
687
-
688
- ### Streaming with YEPCHAT
689
-
690
- ```python
691
- from webscout.client import YEPCHAT
692
-
693
- # Initialize the client
694
- client = YEPCHAT()
695
-
696
- # Create a streaming completion
697
- stream = client.chat.completions.create(
698
- model="Mixtral-8x7B-Instruct-v0.1",
699
- messages=[
700
- {"role": "system", "content": "You are a helpful assistant."},
701
- {"role": "user", "content": "Write a short poem about programming."}
702
- ],
703
- stream=True
704
- )
705
-
706
- # Process the streaming response
707
- for chunk in stream:
708
- if chunk.choices[0].delta.content:
709
- print(chunk.choices[0].delta.content, end="", flush=True)
710
- print() # Add a newline at the end
711
- ```
712
-
713
- ### Streaming with SonusAI
714
-
715
- ```python
716
- from webscout.client import SonusAI
717
-
718
- # Initialize the client
719
- client = SonusAI(timeout=60)
720
-
721
- # Create a streaming completion
722
- stream = client.chat.completions.create(
723
- model="air",
724
- messages=[
725
- {"role": "system", "content": "You are a helpful assistant."},
726
- {"role": "user", "content": "Write a short poem about programming."}
727
- ],
728
- stream=True
729
- )
730
-
731
- # Process the streaming response
732
- for chunk in stream:
733
- if chunk.choices[0].delta.content:
734
- print(chunk.choices[0].delta.content, end="", flush=True)
735
- print() # Add a newline at the end
736
- ```
737
-
738
- ### Basic Usage with ExaChat
739
-
740
- ```python
741
- from webscout.client import ExaChat
742
-
743
- # Initialize the client
744
- client = ExaChat()
745
-
746
- # Create a completion (non-streaming)
747
- response = client.chat.completions.create(
748
- model="exaanswer", # Choose from many available models
749
- messages=[
750
- {"role": "system", "content": "You are a helpful assistant."},
751
- {"role": "user", "content": "Tell me about Python programming."}
752
- ]
753
- )
754
-
755
- # Print the response
756
- print(response.choices[0].message.content)
757
- ```
758
-
759
- ### Using Different ExaChat Providers
760
-
761
- ```python
762
- from webscout.client import ExaChat
763
-
764
- # Initialize the client
765
- client = ExaChat(timeout=60)
766
-
767
- # Use a Gemini model
768
- gemini_response = client.chat.completions.create(
769
- model="gemini-2.0-flash",
770
- messages=[
771
- {"role": "system", "content": "You are a helpful assistant."},
772
- {"role": "user", "content": "Explain quantum computing in simple terms."}
773
- ]
774
- )
775
-
776
- # Use a Groq model
777
- groq_response = client.chat.completions.create(
778
- model="llama-3.1-8b-instant",
779
- messages=[
780
- {"role": "user", "content": "Tell me about Python programming."}
781
- ]
782
- )
783
-
784
- # Print the response
785
- print(response.choices[0].message.content)
786
- ```
787
-
788
- ### Streaming with Netwrck
789
-
790
- ```python
791
- from webscout.client import Netwrck
792
-
793
- # Initialize the client
794
- client = Netwrck(timeout=60)
795
-
796
- # Create a streaming completion
797
- stream = client.chat.completions.create(
798
- model="openai/gpt-4o-mini",
799
- messages=[
800
- {"role": "system", "content": "You are a helpful assistant."},
801
- {"role": "user", "content": "Write a short poem about programming."}
802
- ],
803
- stream=True
804
- )
805
-
806
- # Process the streaming response
807
- for chunk in stream:
808
- if chunk.choices[0].delta.content:
809
- print(chunk.choices[0].delta.content, end="", flush=True)
810
- print() # Add a newline at the end
811
- ```
812
-
813
- ### Basic Usage with StandardInput
814
-
815
- ```python
816
- from webscout.client import StandardInput
817
-
818
- # Initialize the client
819
- client = StandardInput()
820
-
821
- # Create a completion (non-streaming)
822
- response = client.chat.completions.create(
823
- model="standard-quick",
824
- messages=[
825
- {"role": "system", "content": "You are a helpful assistant."},
826
- {"role": "user", "content": "Tell me about Python programming."}
827
- ]
828
- )
829
-
830
- # Print the response
831
- print(response.choices[0].message.content)
832
- ```
833
-
834
- ### Streaming with StandardInput
835
-
836
- ```python
837
- from webscout.client import StandardInput
838
-
839
- # Initialize the client
840
- client = StandardInput()
841
-
842
- # Create a streaming completion
843
- stream = client.chat.completions.create(
844
- model="standard-reasoning",
845
- messages=[
846
- {"role": "system", "content": "You are a helpful assistant."},
847
- {"role": "user", "content": "Count from 1 to 5."}
848
- ],
849
- stream=True,
850
- enable_reasoning=True # Enable reasoning capabilities
851
- )
852
-
853
- # Process the streaming response
854
- for chunk in stream:
855
- if chunk.choices[0].delta.content:
856
- print(chunk.choices[0].delta.content, end="", flush=True)
857
- print() # Add a newline at the end
858
- ```
859
-
860
- ## 🔄 Response Format
861
-
862
- All providers return responses that mimic the OpenAI API structure, ensuring compatibility with tools built for OpenAI.
863
-
864
- ### 📝 Non-streaming Response
865
-
866
- ```json
867
- {
868
- "id": "chatcmpl-123abc",
869
- "object": "chat.completion",
870
- "created": 1677858242,
871
- "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
872
- "usage": {
873
- "prompt_tokens": 13,
874
- "completion_tokens": 7,
875
- "total_tokens": 20
876
- },
877
- "choices": [
878
- {
879
- "message": {
880
- "role": "assistant",
881
- "content": "This is a response from the model."
882
- },
883
- "finish_reason": "stop",
884
- "index": 0
885
- }
886
- ]
887
- }
888
- ```
889
-
890
- ### 📱 Streaming Response Chunks
891
-
892
- ```json
893
- {
894
- "id": "chatcmpl-123abc",
895
- "object": "chat.completion.chunk",
896
- "created": 1677858242,
897
- "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
898
- "choices": [
899
- {
900
- "delta": {
901
- "content": "This "
902
- },
903
- "finish_reason": null,
904
- "index": 0
905
- }
906
- ]
907
- }
908
- ```
909
-
910
- ## 🧩 Architecture
911
-
912
- The OpenAI-compatible providers are built on a modular architecture:
913
-
914
- - `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
915
- - `utils.py`: Provides data structures that mimic OpenAI's response format
916
- - Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
917
-
918
- This architecture makes it easy to add new providers while maintaining a consistent interface.
919
-
920
- ## 📝 Notes
921
-
922
- - Some providers may require API keys for full functionality
923
- - Not all OpenAI features are supported by all providers
924
- - Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
925
-
926
- ## 🤝 Contributing
927
-
928
- Want to add a new OpenAI-compatible provider? Follow these steps:
929
-
930
- 1. Create a new file in the `webscout/Provider/OPENAI` directory
931
- 2. Implement the `OpenAICompatibleProvider` interface
932
- 3. Add appropriate tests
933
- 4. Update this README with information about the new provider
934
-
935
- ## 📚 Related Documentation
936
-
937
- - [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
938
- - [DeepInfra Documentation](https://deepinfra.com/docs)
939
- - [Glider.so Website](https://glider.so/)
940
- - [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
941
- - [X0GPT Website](https://x0-gpt.devwtf.in/)
942
- - [WiseCat Website](https://wise-cat-groq.vercel.app/)
943
- - [Venice AI Website](https://venice.ai/)
944
- - [ExaAI Website](https://o3minichat.exa.ai/)
945
- - [TypeGPT Website](https://chat.typegpt.net/)
946
- - [SciraChat Website](https://scira.ai/)
947
- - [FreeAIChat Website](https://freeaichatplayground.com/)
948
- - [LLMChatCo Website](https://llmchat.co/)
949
- - [Yep.com Website](https://yep.com/)
950
- - [HeckAI Website](https://heck.ai/)
951
- - [SonusAI Website](https://chat.sonus.ai/)
952
- - [ExaChat Website](https://exa-chat.vercel.app/)
953
- - [Netwrck Website](https://netwrck.com/)
954
- - [StandardInput Website](https://chat.standard-input.com/)
955
-
956
- <div align="center">
957
- <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
958
- <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
959
- </div>
1
+ <div align="center">
2
+ <a href="https://github.com/OEvortex/Webscout">
3
+ <img src="https://img.shields.io/badge/WebScout-OpenAI%20Compatible%20Providers-4285F4?style=for-the-badge&logo=openai&logoColor=white" alt="WebScout OpenAI Compatible Providers">
4
+ </a>
5
+ <br/>
6
+ <h1>WebScout OpenAI-Compatible Providers</h1>
7
+ <p><strong>Seamlessly integrate with various AI providers using OpenAI-compatible interfaces</strong></p>
8
+
9
+ <p>
10
+ <img src="https://img.shields.io/badge/Python-3.7+-3776AB?style=flat-square&logo=python&logoColor=white" alt="Python 3.7+">
11
+ <img src="https://img.shields.io/badge/License-MIT-green?style=flat-square" alt="License: MIT">
12
+ <img src="https://img.shields.io/badge/PRs-Welcome-brightgreen?style=flat-square" alt="PRs Welcome">
13
+ </p>
14
+
15
+ <p>
16
+ Access multiple AI providers through a standardized OpenAI-compatible interface, making it easy to switch between providers without changing your code.
17
+ </p>
18
+ </div>
19
+
20
+ ## 🚀 Overview
21
+
22
+ The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
+
24
+ - Use the same code structure across different AI providers
25
+ - Switch between providers without major code changes
26
+ - Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
+
28
+ ## ⚙️ Available Providers
29
+
30
+ Currently, the following providers are implemented with OpenAI-compatible interfaces:
31
+
32
+ - DeepInfra
33
+ - Glider
34
+ - ChatGPTClone
35
+ - X0GPT
36
+ - WiseCat
37
+ - Venice
38
+ - ExaAI
39
+ - TypeGPT
40
+ - SciraChat
41
+ - LLMChatCo
42
+ - YEPCHAT
43
+ - HeckAI
44
+ - SonusAI
45
+ - ExaChat
46
+ - Netwrck
47
+ - StandardInput
48
+ - Writecream
49
+ - toolbaz
50
+ - UncovrAI
51
+ - OPKFC
52
+ - TextPollinations
53
+ - E2B
54
+ - MultiChatAI
55
+ - AI4Chat
56
+ - MCPCore
57
+ - TypefullyAI
58
+ - Flowith
59
+ - ChatSandbox
60
+ - Cloudflare
61
+ - NEMOTRON
62
+ - BLACKBOXAI
63
+ - Copilot
64
+ - TwoAI
65
+ - oivscode
66
+ - Qwen3
67
+ - TogetherAI
68
+ - PiAI
69
+ - FalconH1
70
+ - XenAI
71
+ - GeminiProxy
72
+ - MonoChat
73
+ - Friendli
74
+ - MiniMax
75
+ - QodoAI
76
+ - Kimi
77
+ - GptOss
78
+ ## 💻 Usage Examples
79
+
80
+ Here are examples of how to use the OpenAI-compatible providers in your code.
81
+
82
+ ### Basic Usage with DeepInfra
83
+
84
+ ```python
85
+ from webscout.client import DeepInfra
86
+
87
+ # Initialize the client
88
+ client = DeepInfra()
89
+
90
+ # Create a completion (non-streaming)
91
+ response = client.chat.completions.create(
92
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
93
+ messages=[
94
+ {"role": "system", "content": "You are a helpful assistant."},
95
+ {"role": "user", "content": "Tell me about Python programming."}
96
+ ],
97
+ temperature=0.7,
98
+ max_tokens=500
99
+ )
100
+
101
+ # Print the response
102
+ print(response.choices[0].message.content)
103
+ ```
104
+
105
+ ### Basic Usage with Glider
106
+
107
+ ```python
108
+ from webscout.client import Glider
109
+
110
+ # Initialize the client
111
+ client = Glider()
112
+
113
+ # Create a completion (non-streaming)
114
+ response = client.chat.completions.create(
115
+ model="chat-llama-3-1-70b",
116
+ messages=[
117
+ {"role": "system", "content": "You are a helpful assistant."},
118
+ {"role": "user", "content": "Tell me about Python programming."}
119
+ ],
120
+ max_tokens=500
121
+ )
122
+
123
+ # Print the response
124
+ print(response.choices[0].message.content)
125
+ ```
126
+
127
+ ### Streaming Responses (Example with DeepInfra)
128
+
129
+ ```python
130
+ from webscout.client import DeepInfra
131
+
132
+ # Initialize the client
133
+ client = DeepInfra()
134
+
135
+ # Create a streaming completion
136
+ stream = client.chat.completions.create(
137
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
138
+ messages=[
139
+ {"role": "system", "content": "You are a helpful assistant."},
140
+ {"role": "user", "content": "Write a short poem about programming."}
141
+ ],
142
+ stream=True,
143
+ temperature=0.7
144
+ )
145
+
146
+ # Process the streaming response
147
+ for chunk in stream:
148
+ if chunk.choices[0].delta.content:
149
+ print(chunk.choices[0].delta.content, end="", flush=True)
150
+ print() # Add a newline at the end
151
+ ```
152
+
153
+ ### Streaming with Glider
154
+
155
+ ```python
156
+ from webscout.client import Glider
157
+
158
+ # Initialize the client
159
+ client = Glider()
160
+
161
+ # Create a streaming completion
162
+ stream = client.chat.completions.create(
163
+ model="chat-llama-3-1-70b",
164
+ messages=[
165
+ {"role": "system", "content": "You are a helpful assistant."},
166
+ {"role": "user", "content": "Write a short poem about programming."}
167
+ ],
168
+ stream=True
169
+ )
170
+
171
+ # Process the streaming response
172
+ for chunk in stream:
173
+ if chunk.choices[0].delta.content:
174
+ print(chunk.choices[0].delta.content, end="", flush=True)
175
+ print() # Add a newline at the end
176
+ ```
177
+
178
+ ### Basic Usage with ChatGPTClone
179
+
180
+ ```python
181
+ from webscout.client import ChatGPTClone
182
+
183
+ # Initialize the client
184
+ client = ChatGPTClone()
185
+
186
+ # Create a completion (non-streaming)
187
+ response = client.chat.completions.create(
188
+ model="gpt-4",
189
+ messages=[
190
+ {"role": "system", "content": "You are a helpful assistant."},
191
+ {"role": "user", "content": "Tell me about Python programming."}
192
+ ],
193
+ temperature=0.7
194
+ )
195
+
196
+ # Print the response
197
+ print(response.choices[0].message.content)
198
+ ```
199
+
200
+ ### Streaming with ChatGPTClone
201
+
202
+ ```python
203
+ from webscout.client import ChatGPTClone
204
+
205
+ # Initialize the client
206
+ client = ChatGPTClone()
207
+
208
+ # Create a streaming completion
209
+ stream = client.chat.completions.create(
210
+ model="gpt-4",
211
+ messages=[
212
+ {"role": "system", "content": "You are a helpful assistant."},
213
+ {"role": "user", "content": "Write a short poem about programming."}
214
+ ],
215
+ stream=True
216
+ )
217
+
218
+ # Process the streaming response
219
+ for chunk in stream:
220
+ if chunk.choices[0].delta.content:
221
+ print(chunk.choices[0].delta.content, end="", flush=True)
222
+ print() # Add a newline at the end
223
+ ```
224
+
225
+ ### Basic Usage with X0GPT
226
+
227
+ ```python
228
+ from webscout.client import X0GPT
229
+
230
+ # Initialize the client
231
+ client = X0GPT()
232
+
233
+ # Create a completion (non-streaming)
234
+ response = client.chat.completions.create(
235
+ model="gpt-4", # Model name doesn't matter for X0GPT
236
+ messages=[
237
+ {"role": "system", "content": "You are a helpful assistant."},
238
+ {"role": "user", "content": "Tell me about Python programming."}
239
+ ]
240
+ )
241
+
242
+ # Print the response
243
+ print(response.choices[0].message.content)
244
+ ```
245
+
246
+ ### Streaming with X0GPT
247
+
248
+ ```python
249
+ from webscout.client import X0GPT
250
+
251
+ # Initialize the client
252
+ client = X0GPT()
253
+
254
+ # Create a streaming completion
255
+ stream = client.chat.completions.create(
256
+ model="gpt-4", # Model name doesn't matter for X0GPT
257
+ messages=[
258
+ {"role": "system", "content": "You are a helpful assistant."},
259
+ {"role": "user", "content": "Write a short poem about programming."}
260
+ ],
261
+ stream=True
262
+ )
263
+
264
+ # Process the streaming response
265
+ for chunk in stream:
266
+ if chunk.choices[0].delta.content:
267
+ print(chunk.choices[0].delta.content, end="", flush=True)
268
+ print() # Add a newline at the end
269
+ ```
270
+
271
+ ### Basic Usage with WiseCat
272
+
273
+ ```python
274
+ from webscout.client import WiseCat
275
+
276
+ # Initialize the client
277
+ client = WiseCat()
278
+
279
+ # Create a completion (non-streaming)
280
+ response = client.chat.completions.create(
281
+ model="chat-model-small",
282
+ messages=[
283
+ {"role": "system", "content": "You are a helpful assistant."},
284
+ {"role": "user", "content": "Tell me about Python programming."}
285
+ ]
286
+ )
287
+
288
+ # Print the response
289
+ print(response.choices[0].message.content)
290
+ ```
291
+
292
+ ### Streaming with WiseCat
293
+
294
+ ```python
295
+ from webscout.client import WiseCat
296
+
297
+ # Initialize the client
298
+ client = WiseCat()
299
+
300
+ # Create a streaming completion
301
+ stream = client.chat.completions.create(
302
+ model="chat-model-small",
303
+ messages=[
304
+ {"role": "system", "content": "You are a helpful assistant."},
305
+ {"role": "user", "content": "Write a short poem about programming."}
306
+ ],
307
+ stream=True
308
+ )
309
+
310
+ # Process the streaming response
311
+ for chunk in stream:
312
+ if chunk.choices[0].delta.content:
313
+ print(chunk.choices[0].delta.content, end="", flush=True)
314
+ print() # Add a newline at the end
315
+ ```
316
+
317
+ ### Basic Usage with Venice
318
+
319
+ ```python
320
+ from webscout.client import Venice
321
+
322
+ # Initialize the client
323
+ client = Venice(temperature=0.7, top_p=0.9)
324
+
325
+ # Create a completion (non-streaming)
326
+ response = client.chat.completions.create(
327
+ model="mistral-31-24b",
328
+ messages=[
329
+ {"role": "system", "content": "You are a helpful assistant."},
330
+ {"role": "user", "content": "Tell me about Python programming."}
331
+ ]
332
+ )
333
+
334
+ # Print the response
335
+ print(response.choices[0].message.content)
336
+ ```
337
+
338
+ ### Streaming with Venice
339
+
340
+ ```python
341
+ from webscout.client import Venice
342
+
343
+ # Initialize the client
344
+ client = Venice()
345
+
346
+ # Create a streaming completion
347
+ stream = client.chat.completions.create(
348
+ model="mistral-31-24b",
349
+ messages=[
350
+ {"role": "system", "content": "You are a helpful assistant."},
351
+ {"role": "user", "content": "Write a short poem about programming."}
352
+ ],
353
+ stream=True
354
+ )
355
+
356
+ # Process the streaming response
357
+ for chunk in stream:
358
+ if chunk.choices[0].delta.content:
359
+ print(chunk.choices[0].delta.content, end="", flush=True)
360
+ print() # Add a newline at the end
361
+ ```
362
+
363
+ ### Basic Usage with ExaAI
364
+
365
+ ```python
366
+ from webscout.client import ExaAI
367
+
368
+ # Initialize the client
369
+ client = ExaAI()
370
+
371
+ # Create a completion (non-streaming)
372
+ response = client.chat.completions.create(
373
+ model="O3-Mini",
374
+ messages=[
375
+ # Note: ExaAI does not support system messages (they will be removed)
376
+ {"role": "user", "content": "Hello!"},
377
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
378
+ {"role": "user", "content": "Tell me about Python programming."}
379
+ ]
380
+ )
381
+
382
+ # Print the response
383
+ print(response.choices[0].message.content)
384
+ ```
385
+
386
+ ### Basic Usage with HeckAI
387
+
388
+ ```python
389
+ from webscout.client import HeckAI
390
+
391
+ # Initialize the client
392
+ client = HeckAI(language="English")
393
+
394
+ # Create a completion (non-streaming)
395
+ response = client.chat.completions.create(
396
+ model="google/gemini-2.0-flash-001",
397
+ messages=[
398
+ {"role": "system", "content": "You are a helpful assistant."},
399
+ {"role": "user", "content": "Tell me about Python programming."}
400
+ ]
401
+ )
402
+
403
+ # Print the response
404
+ print(response.choices[0].message.content)
405
+ ```
406
+
407
+ ### Streaming with HeckAI
408
+
409
+ ```python
410
+ from webscout.client import HeckAI
411
+
412
+ # Initialize the client
413
+ client = HeckAI()
414
+
415
+ # Create a streaming completion
416
+ stream = client.chat.completions.create(
417
+ model="google/gemini-2.0-flash-001",
418
+ messages=[
419
+ {"role": "system", "content": "You are a helpful assistant."},
420
+ {"role": "user", "content": "Write a short poem about programming."}
421
+ ],
422
+ stream=True
423
+ )
424
+
425
+ # Process the streaming response
426
+ for chunk in stream:
427
+ if chunk.choices[0].delta.content:
428
+ print(chunk.choices[0].delta.content, end="", flush=True)
429
+ print() # Add a newline at the end
430
+ ```
431
+
432
+ ### Streaming with ExaAI
433
+
434
+ ```python
435
+ from webscout.client import ExaAI
436
+
437
+ # Initialize the client
438
+ client = ExaAI()
439
+
440
+ # Create a streaming completion
441
+ stream = client.chat.completions.create(
442
+ model="O3-Mini",
443
+ messages=[
444
+ # Note: ExaAI does not support system messages (they will be removed)
445
+ {"role": "user", "content": "Hello!"},
446
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
447
+ {"role": "user", "content": "Write a short poem about programming."}
448
+ ],
449
+ stream=True
450
+ )
451
+
452
+ # Process the streaming response
453
+ for chunk in stream:
454
+ if chunk.choices[0].delta.content:
455
+ print(chunk.choices[0].delta.content, end="", flush=True)
456
+ print() # Add a newline at the end
457
+ ```
458
+
459
+ ### Basic Usage with TypeGPT
460
+
461
+ ```python
462
+ from webscout.client import TypeGPT
463
+
464
+ # Initialize the client
465
+ client = TypeGPT()
466
+
467
+ # Create a completion (non-streaming)
468
+ response = client.chat.completions.create(
469
+ model="chatgpt-4o-latest",
470
+ messages=[
471
+ {"role": "system", "content": "You are a helpful assistant."},
472
+ {"role": "user", "content": "Write a short poem about programming."}
473
+ ]
474
+ )
475
+
476
+ # Print the response
477
+ print(response.choices[0].message.content)
478
+ ```
479
+
480
+ ### Streaming with TypeGPT
481
+
482
+ ```python
483
+ from webscout.client import TypeGPT
484
+
485
+ # Initialize the client
486
+ client = TypeGPT()
487
+
488
+ # Create a streaming completion
489
+ stream = client.chat.completions.create(
490
+ model="chatgpt-4o-latest",
491
+ messages=[
492
+ {"role": "system", "content": "You are a helpful assistant."},
493
+ {"role": "user", "content": "Write a short poem about programming."}
494
+ ],
495
+ stream=True
496
+ )
497
+
498
+ # Process the streaming response
499
+ for chunk in stream:
500
+ if chunk.choices[0].delta.content:
501
+ print(chunk.choices[0].delta.content, end="", flush=True)
502
+ print() # Add a newline at the end
503
+ ```
504
+
505
+ ### Basic Usage with SciraChat
506
+
507
+ ```python
508
+ from webscout.client import SciraChat
509
+
510
+ # Initialize the client
511
+ client = SciraChat()
512
+
513
+ # Create a completion (non-streaming)
514
+ response = client.chat.completions.create(
515
+ model="scira-default",
516
+ messages=[
517
+ {"role": "system", "content": "You are a helpful assistant."},
518
+ {"role": "user", "content": "Tell me about Python programming."}
519
+ ]
520
+ )
521
+
522
+ # Print the response
523
+ print(response.choices[0].message.content)
524
+ ```
525
+
526
+ ### Streaming with SciraChat
527
+
528
+ ```python
529
+ from webscout.client import SciraChat
530
+
531
+ # Initialize the client
532
+ client = SciraChat()
533
+
534
+ # Create a streaming completion
535
+ stream = client.chat.completions.create(
536
+ model="scira-default",
537
+ messages=[
538
+ {"role": "system", "content": "You are a helpful assistant."},
539
+ {"role": "user", "content": "Write a short poem about programming."}
540
+ ],
541
+ stream=True
542
+ )
543
+
544
+ # Process the streaming response
545
+ for chunk in stream:
546
+ if chunk.choices[0].delta.content:
547
+ print(chunk.choices[0].delta.content, end="", flush=True)
548
+ print() # Add a newline at the end
549
+ ```
550
+
551
+ ### Basic Usage with FreeAIChat
552
+
553
+ ```python
554
+ from webscout.client import FreeAIChat
555
+
556
+ # Initialize the client
557
+ client = FreeAIChat()
558
+
559
+ # Create a completion (non-streaming)
560
+ response = client.chat.completions.create(
561
+ model="GPT 4o",
562
+ messages=[
563
+ {"role": "system", "content": "You are a helpful assistant."},
564
+ {"role": "user", "content": "Tell me about Python programming."}
565
+ ]
566
+ )
567
+
568
+ # Print the response
569
+ print(response.choices[0].message.content)
570
+ ```
571
+
572
+ ### Streaming with FreeAIChat
573
+
574
+ ```python
575
+ from webscout.client import FreeAIChat
576
+
577
+ # Initialize the client
578
+ client = FreeAIChat()
579
+
580
+ # Create a streaming completion
581
+ stream = client.chat.completions.create(
582
+ model="GPT 4o",
583
+ messages=[
584
+ {"role": "system", "content": "You are a helpful assistant."},
585
+ {"role": "user", "content": "Write a short poem about programming."}
586
+ ],
587
+ stream=True
588
+ )
589
+
590
+ # Process the streaming response
591
+ for chunk in stream:
592
+ if chunk.choices[0].delta.content:
593
+ print(chunk.choices[0].delta.content, end="", flush=True)
594
+ print() # Add a newline at the end
595
+ ```
596
+
597
+ ### Basic Usage with LLMChatCo
598
+
599
+ ```python
600
+ from webscout.client import LLMChatCo
601
+
602
+ # Initialize the client
603
+ client = LLMChatCo()
604
+
605
+ # Create a completion (non-streaming)
606
+ response = client.chat.completions.create(
607
+ model="gemini-flash-2.0", # Default model
608
+ messages=[
609
+ {"role": "system", "content": "You are a helpful assistant."},
610
+ {"role": "user", "content": "Tell me about Python programming."}
611
+ ],
612
+ temperature=0.7
613
+ )
614
+
615
+ # Print the response
616
+ print(response.choices[0].message.content)
617
+ ```
618
+
619
+ ### Streaming with LLMChatCo
620
+
621
+ ```python
622
+ from webscout.client import LLMChatCo
623
+
624
+ # Initialize the client
625
+ client = LLMChatCo()
626
+
627
+ # Create a streaming completion
628
+ stream = client.chat.completions.create(
629
+ model="gemini-flash-2.0",
630
+ messages=[
631
+ {"role": "system", "content": "You are a helpful assistant."},
632
+ {"role": "user", "content": "Write a short poem about programming."}
633
+ ],
634
+ stream=True
635
+ )
636
+
637
+ # Process the streaming response
638
+ for chunk in stream:
639
+ if chunk.choices[0].delta.content:
640
+ print(chunk.choices[0].delta.content, end="", flush=True)
641
+ print() # Add a newline at the end
642
+ ```
643
+
644
+ ### Basic Usage with YEPCHAT
645
+
646
+ ```python
647
+ from webscout.client import YEPCHAT
648
+
649
+ # Initialize the client
650
+ client = YEPCHAT()
651
+
652
+ # Create a completion (non-streaming)
653
+ response = client.chat.completions.create(
654
+ model="DeepSeek-R1-Distill-Qwen-32B",
655
+ messages=[
656
+ {"role": "system", "content": "You are a helpful assistant."},
657
+ {"role": "user", "content": "Tell me about Python programming."}
658
+ ],
659
+ temperature=0.7
660
+ )
661
+
662
+ # Print the response
663
+ print(response.choices[0].message.content)
664
+ ```
665
+
666
+ ### Basic Usage with SonusAI
667
+
668
+ ```python
669
+ from webscout.client import SonusAI
670
+
671
+ # Initialize the client
672
+ client = SonusAI()
673
+
674
+ # Create a completion (non-streaming)
675
+ response = client.chat.completions.create(
676
+ model="pro", # Choose from 'pro', 'air', or 'mini'
677
+ messages=[
678
+ {"role": "system", "content": "You are a helpful assistant."},
679
+ {"role": "user", "content": "Tell me about Python programming."}
680
+ ],
681
+ reasoning=True # Optional: Enable reasoning mode
682
+ )
683
+
684
+ # Print the response
685
+ print(response.choices[0].message.content)
686
+ ```
687
+
688
+ ### Streaming with YEPCHAT
689
+
690
+ ```python
691
+ from webscout.client import YEPCHAT
692
+
693
+ # Initialize the client
694
+ client = YEPCHAT()
695
+
696
+ # Create a streaming completion
697
+ stream = client.chat.completions.create(
698
+ model="Mixtral-8x7B-Instruct-v0.1",
699
+ messages=[
700
+ {"role": "system", "content": "You are a helpful assistant."},
701
+ {"role": "user", "content": "Write a short poem about programming."}
702
+ ],
703
+ stream=True
704
+ )
705
+
706
+ # Process the streaming response
707
+ for chunk in stream:
708
+ if chunk.choices[0].delta.content:
709
+ print(chunk.choices[0].delta.content, end="", flush=True)
710
+ print() # Add a newline at the end
711
+ ```
712
+
713
+ ### Streaming with SonusAI
714
+
715
+ ```python
716
+ from webscout.client import SonusAI
717
+
718
+ # Initialize the client
719
+ client = SonusAI(timeout=60)
720
+
721
+ # Create a streaming completion
722
+ stream = client.chat.completions.create(
723
+ model="air",
724
+ messages=[
725
+ {"role": "system", "content": "You are a helpful assistant."},
726
+ {"role": "user", "content": "Write a short poem about programming."}
727
+ ],
728
+ stream=True
729
+ )
730
+
731
+ # Process the streaming response
732
+ for chunk in stream:
733
+ if chunk.choices[0].delta.content:
734
+ print(chunk.choices[0].delta.content, end="", flush=True)
735
+ print() # Add a newline at the end
736
+ ```
737
+
738
+ ### Basic Usage with ExaChat
739
+
740
+ ```python
741
+ from webscout.client import ExaChat
742
+
743
+ # Initialize the client
744
+ client = ExaChat()
745
+
746
+ # Create a completion (non-streaming)
747
+ response = client.chat.completions.create(
748
+ model="exaanswer", # Choose from many available models
749
+ messages=[
750
+ {"role": "system", "content": "You are a helpful assistant."},
751
+ {"role": "user", "content": "Tell me about Python programming."}
752
+ ]
753
+ )
754
+
755
+ # Print the response
756
+ print(response.choices[0].message.content)
757
+ ```
758
+
759
+ ### Using Different ExaChat Providers
760
+
761
+ ```python
762
+ from webscout.client import ExaChat
763
+
764
+ # Initialize the client
765
+ client = ExaChat(timeout=60)
766
+
767
+ # Use a Gemini model
768
+ gemini_response = client.chat.completions.create(
769
+ model="gemini-2.0-flash",
770
+ messages=[
771
+ {"role": "system", "content": "You are a helpful assistant."},
772
+ {"role": "user", "content": "Explain quantum computing in simple terms."}
773
+ ]
774
+ )
775
+
776
+ # Use a Groq model
777
+ groq_response = client.chat.completions.create(
778
+ model="llama-3.1-8b-instant",
779
+ messages=[
780
+ {"role": "user", "content": "Tell me about Python programming."}
781
+ ]
782
+ )
783
+
784
+ # Print the response
785
+ print(response.choices[0].message.content)
786
+ ```
787
+
788
+ ### Streaming with Netwrck
789
+
790
+ ```python
791
+ from webscout.client import Netwrck
792
+
793
+ # Initialize the client
794
+ client = Netwrck(timeout=60)
795
+
796
+ # Create a streaming completion
797
+ stream = client.chat.completions.create(
798
+ model="openai/gpt-4o-mini",
799
+ messages=[
800
+ {"role": "system", "content": "You are a helpful assistant."},
801
+ {"role": "user", "content": "Write a short poem about programming."}
802
+ ],
803
+ stream=True
804
+ )
805
+
806
+ # Process the streaming response
807
+ for chunk in stream:
808
+ if chunk.choices[0].delta.content:
809
+ print(chunk.choices[0].delta.content, end="", flush=True)
810
+ print() # Add a newline at the end
811
+ ```
812
+
813
+ ### Basic Usage with StandardInput
814
+
815
+ ```python
816
+ from webscout.client import StandardInput
817
+
818
+ # Initialize the client
819
+ client = StandardInput()
820
+
821
+ # Create a completion (non-streaming)
822
+ response = client.chat.completions.create(
823
+ model="standard-quick",
824
+ messages=[
825
+ {"role": "system", "content": "You are a helpful assistant."},
826
+ {"role": "user", "content": "Tell me about Python programming."}
827
+ ]
828
+ )
829
+
830
+ # Print the response
831
+ print(response.choices[0].message.content)
832
+ ```
833
+
834
+ ### Streaming with StandardInput
835
+
836
+ ```python
837
+ from webscout.client import StandardInput
838
+
839
+ # Initialize the client
840
+ client = StandardInput()
841
+
842
+ # Create a streaming completion
843
+ stream = client.chat.completions.create(
844
+ model="standard-reasoning",
845
+ messages=[
846
+ {"role": "system", "content": "You are a helpful assistant."},
847
+ {"role": "user", "content": "Count from 1 to 5."}
848
+ ],
849
+ stream=True,
850
+ enable_reasoning=True # Enable reasoning capabilities
851
+ )
852
+
853
+ # Process the streaming response
854
+ for chunk in stream:
855
+ if chunk.choices[0].delta.content:
856
+ print(chunk.choices[0].delta.content, end="", flush=True)
857
+ print() # Add a newline at the end
858
+ ```
859
+
860
+ ## 🔄 Response Format
861
+
862
+ All providers return responses that mimic the OpenAI API structure, ensuring compatibility with tools built for OpenAI.
863
+
864
+ ### 📝 Non-streaming Response
865
+
866
+ ```json
867
+ {
868
+ "id": "chatcmpl-123abc",
869
+ "object": "chat.completion",
870
+ "created": 1677858242,
871
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
872
+ "usage": {
873
+ "prompt_tokens": 13,
874
+ "completion_tokens": 7,
875
+ "total_tokens": 20
876
+ },
877
+ "choices": [
878
+ {
879
+ "message": {
880
+ "role": "assistant",
881
+ "content": "This is a response from the model."
882
+ },
883
+ "finish_reason": "stop",
884
+ "index": 0
885
+ }
886
+ ]
887
+ }
888
+ ```
889
+
890
+ ### 📱 Streaming Response Chunks
891
+
892
+ ```json
893
+ {
894
+ "id": "chatcmpl-123abc",
895
+ "object": "chat.completion.chunk",
896
+ "created": 1677858242,
897
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
898
+ "choices": [
899
+ {
900
+ "delta": {
901
+ "content": "This "
902
+ },
903
+ "finish_reason": null,
904
+ "index": 0
905
+ }
906
+ ]
907
+ }
908
+ ```
909
+
910
+ ## 🧩 Architecture
911
+
912
+ The OpenAI-compatible providers are built on a modular architecture:
913
+
914
+ - `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
915
+ - `utils.py`: Provides data structures that mimic OpenAI's response format
916
+ - Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
917
+
918
+ This architecture makes it easy to add new providers while maintaining a consistent interface.
919
+
920
+ ## 📝 Notes
921
+
922
+ - Some providers may require API keys for full functionality
923
+ - Not all OpenAI features are supported by all providers
924
+ - Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
925
+
926
+ ## 🤝 Contributing
927
+
928
+ Want to add a new OpenAI-compatible provider? Follow these steps:
929
+
930
+ 1. Create a new file in the `webscout/Provider/OPENAI` directory
931
+ 2. Implement the `OpenAICompatibleProvider` interface
932
+ 3. Add appropriate tests
933
+ 4. Update this README with information about the new provider
934
+
935
+ ## 📚 Related Documentation
936
+
937
+ - [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
938
+ - [DeepInfra Documentation](https://deepinfra.com/docs)
939
+ - [Glider.so Website](https://glider.so/)
940
+ - [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
941
+ - [X0GPT Website](https://x0-gpt.devwtf.in/)
942
+ - [WiseCat Website](https://wise-cat-groq.vercel.app/)
943
+ - [Venice AI Website](https://venice.ai/)
944
+ - [ExaAI Website](https://o3minichat.exa.ai/)
945
+ - [TypeGPT Website](https://chat.typegpt.net/)
946
+ - [SciraChat Website](https://scira.ai/)
947
+ - [FreeAIChat Website](https://freeaichatplayground.com/)
948
+ - [LLMChatCo Website](https://llmchat.co/)
949
+ - [Yep.com Website](https://yep.com/)
950
+ - [HeckAI Website](https://heck.ai/)
951
+ - [SonusAI Website](https://chat.sonus.ai/)
952
+ - [ExaChat Website](https://exa-chat.vercel.app/)
953
+ - [Netwrck Website](https://netwrck.com/)
954
+ - [StandardInput Website](https://chat.standard-input.com/)
955
+
956
+ <div align="center">
957
+ <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
958
+ <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
959
+ </div>