webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,1253 +1,952 @@
1
- <div align="center">
2
- <a href="https://github.com/OEvortex/Webscout">
3
- <img src="https://img.shields.io/badge/WebScout-OpenAI%20Compatible%20Providers-4285F4?style=for-the-badge&logo=openai&logoColor=white" alt="WebScout OpenAI Compatible Providers">
4
- </a>
5
- <br/>
6
- <h1>WebScout OpenAI-Compatible Providers</h1>
7
- <p><strong>Seamlessly integrate with various AI providers using OpenAI-compatible interfaces</strong></p>
8
-
9
- <p>
10
- <img src="https://img.shields.io/badge/Python-3.7+-3776AB?style=flat-square&logo=python&logoColor=white" alt="Python 3.7+">
11
- <img src="https://img.shields.io/badge/License-MIT-green?style=flat-square" alt="License: MIT">
12
- <img src="https://img.shields.io/badge/PRs-Welcome-brightgreen?style=flat-square" alt="PRs Welcome">
13
- </p>
14
-
15
- <p>
16
- Access multiple AI providers through a standardized OpenAI-compatible interface, making it easy to switch between providers without changing your code.
17
- </p>
18
- </div>
19
-
20
- ## 🚀 Overview
21
-
22
- The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
-
24
- * Use the same code structure across different AI providers
25
- * Switch between providers without major code changes
26
- * Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
-
28
- ## ⚙️ Available Providers
29
-
30
- Currently, the following providers are implemented with OpenAI-compatible interfaces:
31
-
32
- - DeepInfra
33
- - Glider
34
- - ChatGPTClone
35
- - X0GPT
36
- - WiseCat
37
- - Venice
38
- - ExaAI
39
- - TypeGPT
40
- - SciraChat
41
- - LLMChatCo
42
- - FreeAIChat
43
- - YEPCHAT
44
- - HeckAI
45
- - SonusAI
46
- - ExaChat
47
- - Netwrck
48
- - StandardInput
49
- - Writecream
50
- - toolbaz
51
- - UncovrAI
52
- - OPKFC
53
- - TextPollinations
54
- - E2B
55
- - MultiChatAI
56
- - AI4Chat
57
- - MCPCore
58
- - TypefullyAI
59
- - Flowith
60
- - ChatSandbox
61
- - Cloudflare
62
- - NEMOTRON
63
- - BLACKBOXAI
64
- ---
65
-
66
- ### <img src="https://img.shields.io/badge/DeepInfra-0A0A0A?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiM1OGE2ZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMjAgMTFhOCA4IDAgMCAwLTE2IDAiPjwvcGF0aD48cGF0aCBkPSJtMTIgMTEgOS0xIj48L3BhdGg+PHBhdGggZD0iTTEyIDExIDMgMTAiPjwvcGF0aD48cGF0aCBkPSJNMTIgMTFWMiI+PC9wYXRoPjxwYXRoIGQ9Ik0xMiAxMXY5Ij48L3BhdGg+PC9zdmc+" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> DeepInfra
67
-
68
- Access DeepInfra's powerful models through an OpenAI-compatible interface.
69
-
70
- **Available Models:**
71
-
72
- * `deepseek-ai/DeepSeek-V3`
73
- * `google/gemma-2-27b-it`
74
- * `meta-llama/Llama-4-Maverick-17B`
75
- * `meta-llama/Llama-3.3-70B-Instruct`
76
- * `microsoft/phi-4`
77
- * `mistralai/Mistral-Small-24B`
78
- * `Qwen/QwQ-32B`
79
-
80
- [View all models →](https://deepinfra.com/models)
81
-
82
- ---
83
-
84
- ### <img src="https://img.shields.io/badge/Glider-5C5CFF?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTQgMmMxLjgyLjgyIDMgMi41NyAzIDQuNSAwIDMtMy41IDUuNS02LjUgNS41YTQuMzUgNC4zNSAwIDAgMS0yLjUtLjc4QTYgNiAwIDAgMCAxNiAxM2MzLjMxIDAgNi0xLjggNi01LjVDMjIgMy4yIDE5LjMxIDEgMTYgMXoiPjwvcGF0aD48cGF0aCBkPSJNMiAyMi41QzIgMTkuNDYgNS41NSAxNyA5LjUgMTdzNy41IDIuNDYgNy41IDUuNVMxMy40NSAyOCA5LjUgMjggMiAyNS41NCAyIDIyLjV6Ij48L3BhdGg+PHBhdGggZD0iTTExIDE0Yy0xLjgyLS44Mi0zLTIuNTctMy00LjUgMC0zIDMuNS01LjUgNi41LTUuNWE0LjM1IDQuMzUgMCAwIDEgMi41Ljc4QTYgNiAwIDAgMCA5IDRDNS42OSA0IDMgNS44IDMgOS41YzAgMi42OSAyLjY5IDQuOSA2IDUuNXoiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> Glider
85
-
86
- Access Glider.so's models through an OpenAI-compatible interface.
87
-
88
- **Available Models:**
89
-
90
- * `chat-llama-3-1-70b`
91
- * `chat-llama-3-1-8b`
92
- * `chat-llama-3-2-3b`
93
- * `deepseek-ai/DeepSeek-R1`
94
-
95
- ---
96
-
97
- ### <img src="https://img.shields.io/badge/ChatGPTClone-10A37F?style=flat-square&logo=openai&logoColor=white" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> ChatGPTClone
98
-
99
- Access ChatGPT Clone API through an OpenAI-compatible interface.
100
-
101
- **Available Models:**
102
-
103
- * `gpt-4`
104
- * `gpt-3.5-turbo`
105
-
106
- ---
107
-
108
- ### <img src="https://img.shields.io/badge/X0GPT-000000?style=flat-square&logo=x&logoColor=white" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> X0GPT
109
-
110
- Access X0GPT API through an OpenAI-compatible interface.
111
-
112
- **Available Models:**
113
-
114
- * `gpt-4`
115
- * `gpt-3.5-turbo`
116
-
117
- ---
118
-
119
- ### <img src="https://img.shields.io/badge/WiseCat-FF6B6B?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMjJjNS41MjMgMCAxMC00LjQ3NyAxMC0xMFMxNy41MjMgMiAxMiAyIDIgNi40NzcgMiAxMnM0LjQ3NyAxMCAxMCAxMHoiPjwvcGF0aD48cGF0aCBkPSJNOCA5aDJhMiAyIDAgMCAwIDIgMnYyYzAgMS4xLjkgMiAyIDJoMiI+PC9wYXRoPjxwYXRoIGQ9Ik0xMCAxNGgtMmEyIDIgMCAwIDEtMi0ydi0yYzAtMS4xLS45LTItMi0ySDIiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> WiseCat
120
-
121
- Access WiseCat API through an OpenAI-compatible interface.
122
-
123
- **Available Models:**
124
-
125
- * `chat-model-small`
126
- * `chat-model-large`
127
- * `chat-model-reasoning`
128
-
129
- ---
130
-
131
- ### <img src="https://img.shields.io/badge/Venice-3498DB?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMTlsNy03IDMgMyAtNyA3LTMtM3oiPjwvcGF0aD48cGF0aCBkPSJNMTggMTNsLTEuNS03LjVMMiAybDMuNSAxNC41TDEzIDE4bDUtNXoiPjwvcGF0aD48cGF0aCBkPSJNMiAybDcuNTg2IDcuNTg2Ij48L3BhdGggZD0iTTExIDExbDUgNSI+PC9wYXRoPjwvc3ZnPg==" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> Venice
132
-
133
- Access Venice AI API through an OpenAI-compatible interface.
134
-
135
- **Available Models:**
136
-
137
- * `mistral-31-24b`
138
- * `llama-3.2-3b-akash`
139
- * `qwen2dot5-coder-32b`
140
- * `deepseek-coder-v2-lite`
141
-
142
- ---
143
-
144
- ### <img src="https://img.shields.io/badge/ExaAI-6236FF?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTEyIDhhNCA0IDAgMSAwIDAgOCA0IDQgMCAwIDAgMC04eiI+PC9wYXRoPjxwYXRoIGQ9Ik0xMiAydjQiPjwvcGF0aCBkPSJNMTIgMTh2NCI+PC9wYXRoPjxwYXRoIGQ9Ik00LjkzIDQuOTNsMyAzIj48L3BhdGggZD0iTTE2LjA3IDE2LjA3bDMgMyI+PC9wYXRoPjxwYXRoIGQ9Ik0yIDEyaDQiPjwvcGF0aD48cGF0aCBkPSJNMTggMTJoNCI+PC9wYXRoPjxwYXRoIGQ9Ik00LjkzIDE5LjA3bDMtMyI+PC9wYXRoPjxwYXRoIGQ9Ik0xNi4wNyA3LjkzbDMtMyI+PC9wYXRoPjwvc3ZnPg==" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> ExaAI
145
-
146
- Access ExaAI's O3-Mini model through an OpenAI-compatible interface.
147
-
148
- **Available Models:**
149
-
150
- * `O3-Mini`: ExaAI's O3-Mini model
151
-
152
- > **Important Note:** ExaAI does not support system messages. Any system messages will be automatically removed from the conversation.
153
-
154
- ---
155
-
156
- ### <img src="https://img.shields.io/badge/TypeGPT-4B32C3?style=flat-square&logo=typescript&logoColor=white" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> TypeGPT
157
-
158
- Access TypeGPT.net's models through an OpenAI-compatible interface.
159
-
160
- **Available Models:**
161
-
162
- * `gpt-4o-mini-2024-07-18`: OpenAI's GPT-4o mini model
163
- * `chatgpt-4o-latest`: Latest version of ChatGPT with GPT-4o
164
- * `deepseek-r1`: DeepSeek's R1 model
165
- * `deepseek-v3`: DeepSeek's V3 model
166
- * `uncensored-r1`: Uncensored version of DeepSeek R1
167
- * `Image-Generator`: For generating images
168
-
169
- ---
170
-
171
- ### <img src="https://img.shields.io/badge/SciraChat-FF5700?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMjEgMTVhMiAyIDAgMCAxLTIgMmgtOWE2IDYgMCAwIDEtNi02VjhoMTBhMiAyIDAgMCAxIDIgMnYyaDRhMiAyIDAgMCAxIDIgMnoiPjwvcGF0aD48cGF0aCBkPSJNMTQgMTFhMiAyIDAgMCAxLTIgMkg0YTIgMiAwIDAgMS0yLTJWN2EyIDIgMCAwIDEgMi0yaDEwYTIgMiAwIDAgMSAyIDJ6Ij48L3BhdGg+PC9zdmc+" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> SciraChat
172
-
173
- Access Scira.ai's models through an OpenAI-compatible interface.
174
-
175
- **Available Models:**
176
-
177
- * `scira-default`: Grok3 model
178
- * `scira-grok-3-mini`: Grok3-mini (thinking model)
179
- * `scira-vision`: Grok2-Vision (vision model)
180
- * `scira-claude`: Sonnet-3.7 model
181
- * `scira-optimus`: Optimus model
182
-
183
- ---
184
-
185
- ### <img src="https://img.shields.io/badge/LLMChatCo-4A90E2?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTggMTRzMS41IDIgNCAxLjVjMi41LS41IDQtMS41IDQtMS41Ij48L3BhdGg+PHBhdGggZD0iTTkgOWguMDEiPjwvcGF0aD48cGF0aCBkPSJNMTUgOWguMDEiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> LLMChatCo
186
-
187
- Access LLMChat.co's models through an OpenAI-compatible interface.
188
-
189
- **Available Models:**
190
-
191
- * `gemini-flash-2.0`: Google's Gemini Flash 2.0 model (default)
192
- * `llama-4-scout`: Meta's Llama 4 Scout model
193
- * `gpt-4o-mini`: OpenAI's GPT-4o mini model
194
-
195
- ---
196
-
197
- ### <img src="https://img.shields.io/badge/FreeAIChat-00C7B7?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMjEgMTVhMiAyIDAgMCAxLTIgMkgzYTIgMiAwIDAgMS0yLTJWN2EyIDIgMCAwIDEgMi0yaDEwYTIgMiAwIDAgMSAyIDJ2M2g0YTIgMiAwIDAgMSAyIDJ6Ij48L3BhdGg+PC9zdmc+" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> FreeAIChat
198
-
199
- Access FreeAIChat's wide range of models through an OpenAI-compatible interface.
200
-
201
- **Available Models:**
202
-
203
- **<img src="https://img.shields.io/badge/OpenAI-412991?style=flat-square&logo=openai&logoColor=white" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> OpenAI Models**
204
- * `GPT 4o`
205
- * `GPT 4.5 Preview`
206
- * `GPT 4o Latest`
207
- * `O1`
208
- * `O3 Mini`
209
-
210
- **<img src="https://img.shields.io/badge/Anthropic-0000FF?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTEyIDhhNCA0IDAgMSAwIDAgOCA0IDQgMCAwIDAgMC04eiI+PC9wYXRoPjwvc3ZnPg==" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> Anthropic Models**
211
- * `Claude 3.5 haiku`
212
- * `Claude 3.5 sonnet`
213
- * `Claude 3.7 Sonnet`
214
-
215
- **<img src="https://img.shields.io/badge/Google-4285F4?style=flat-square&logo=google&logoColor=white" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> Google Models**
216
- * `Gemini 1.5 Flash`
217
- * `Gemini 1.5 Pro`
218
- * `Gemini 2.0 Pro`
219
- * `Gemini 2.5 Pro`
220
-
221
- **<img src="https://img.shields.io/badge/Llama-FF6B6B?style=flat-square&logo=meta&logoColor=white" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> Llama Models**
222
- * `Llama 3.1 405B`
223
- * `Llama 3.3 70B`
224
- * `Llama 4 Scout`
225
-
226
- **<img src="https://img.shields.io/badge/Mistral-7952B3?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMTlsNy03IDMgMyAtNyA3LTMtM3oiPjwvcGF0aD48cGF0aCBkPSJNMTggMTNsLTEuNS03LjVMMiAybDMuNSAxNC41TDEzIDE4bDUtNXoiPjwvcGF0aD48L3N2Zz4=" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> Mistral Models**
227
- * `Mistral Large`
228
- * `Mistral Nemo`
229
- * `Mixtral 8x22B`
230
-
231
- **<img src="https://img.shields.io/badge/Other-34D399?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmE5IDkgMCAwIDEgOSA5IDkgOSAwIDAgMS05IDkgOSA5IDAgMCAxLTkgOSA5IDkgMCAwIDEtOS05eiI+PC9wYXRoPjxwYXRoIGQ9Ik0xMiAyYTkgOSAwIDAgMC05IDkgOSA5IDAgMCAwIDkgOSA5IDkgMCAwIDAgOS05IDkgOSAwIDAgMC05LTl6Ij48L3BhdGg+PHBhdGggZD0iTTEyIDJhOSA5IDAgMCAxIDAgMTggOSA5IDAgMCAxIDAtMTh6Ij48L3BhdGg+PC9zdmc+" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> Other Models**
232
- * `Deepseek R1`
233
- * `Qwen Max`
234
- * `Grok 3`
235
-
236
- ---
237
-
238
- ### <img src="https://img.shields.io/badge/YEPCHAT-FFD700?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiMwMDAwMDAiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTkgMTZhMyAzIDAgMCAwIDYgMCI+PC9wYXRoPjxwYXRoIGQ9Ik05IDloLjAxIj48L3BhdGg+PHBhdGggZD0iTTE1IDloLjAxIj48L3BhdGg+PC9zdmc+" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> YEPCHAT
239
-
240
- Access Yep.com's models through an OpenAI-compatible interface.
241
-
242
- **Available Models:**
243
-
244
- * `DeepSeek-R1-Distill-Qwen-32B`
245
- * `Mixtral-8x7B-Instruct-v0.1`
246
-
247
- ---
248
-
249
- ### <img src="https://img.shields.io/badge/HeckAI-5D3FD3?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTggMTRzMS41IDIgNCAxLjVjMi41LS41IDQtMS41IDQtMS41Ij48L3BhdGg+PHBhdGggZD0iTTkgOWguMDEiPjwvcGF0aD48cGF0aCBkPSJNMTUgOWguMDEiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> HeckAI
250
-
251
- Access HeckAI's models through an OpenAI-compatible interface.
252
-
253
- **Available Models:**
254
-
255
- * `deepseek/deepseek-chat`
256
- * `openai/gpt-4o-mini`
257
- * `deepseek/deepseek-r1`
258
- * `google/gemini-2.0-flash-001`
259
-
260
- ---
261
-
262
- ### <img src="https://img.shields.io/badge/SonusAI-00BFFF?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmE5IDkgMCAwIDEgOSA5LjUgOSA5IDAgMCAxLTkgOS41IDkgOSAwIDAgMS05LTkuNUE5IDkgMCAwIDEgMTIgMnoiPjwvcGF0aD48cGF0aCBkPSJNOCAxNGEzIDMgMCAwIDAgNiAwIj48L3BhdGg+PHBhdGggZD0iTTkgOWguMDEiPjwvcGF0aD48cGF0aCBkPSJNMTUgOWguMDEiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> SonusAI
263
-
264
- Access SonusAI's models through an OpenAI-compatible interface.
265
-
266
- **Available Models:**
267
-
268
- * `pro` - SonusAI's premium model
269
- * `air` - SonusAI's balanced model
270
- * `mini` - SonusAI's lightweight model
271
-
272
- ---
273
-
274
- ### <img src="https://img.shields.io/badge/ExaChat-4B0082?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTggMTRzMS41IDIgNCAxLjVjMi41LS41IDQtMS41IDQtMS41Ij48L3BhdGg+PHBhdGggZD0iTTkgOWguMDEiPjwvcGF0aD48cGF0aCBkPSJNMTUgOWguMDEiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> ExaChat
275
-
276
- Access ExaChat's multi-provider models through an OpenAI-compatible interface.
277
-
278
- **Available Models:**
279
-
280
- * ExaAnswer: `exaanswer`
281
- * Gemini: `gemini-2.0-flash`, `gemini-2.5-pro-exp-03-25`, and more
282
- * OpenRouter: `deepseek/deepseek-r1:free`, `meta-llama/llama-4-maverick:free`, and more
283
- * Groq: `llama-3.1-8b-instant`, `qwen-2.5-32b`, and more
284
- * Cerebras: `llama3.1-8b`, `llama-3.3-70b`
285
-
286
- ---
287
-
288
- ### <img src="https://img.shields.io/badge/Netwrck-3498DB?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTggMTRzMS41IDIgNCAxLjVjMi41LS41IDQtMS41IDQtMS41Ij48L3BhdGg+PHBhdGggZD0iTTkgOWguMDEiPjwvcGF0aD48cGF0aCBkPSJNMTUgOWguMDEiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> Netwrck
289
-
290
- Access Netwrck's models through an OpenAI-compatible interface.
291
-
292
- **Available Models:**
293
-
294
- * `anthropic/claude-3-7-sonnet-20250219`
295
- * `openai/gpt-4o-mini`
296
- * `deepseek/deepseek-r1`
297
- * `deepseek/deepseek-chat`
298
- * `x-ai/grok-2`
299
- * `google/gemini-pro-1.5`
300
- * And more
301
-
302
- ---
303
-
304
- ### <img src="https://img.shields.io/badge/StandardInput-4A90E2?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMjEgMTVhMiAyIDAgMCAxLTIgMkgzYTIgMiAwIDAgMS0yLTJWN2EyIDIgMCAwIDEgMi0yaDEwYTIgMiAwIDAgMSAyIDJ2M2g0YTIgMiAwIDAgMSAyIDJ6Ij48L3BhdGg+PC9zdmc+" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> StandardInput
305
-
306
- Access Standard Input's chat models through an OpenAI-compatible interface.
307
-
308
- **Available Models:**
309
-
310
- * `standard-quick`: Standard Input's quick response model
311
- * `standard-reasoning`: Standard Input's model with reasoning capabilities
312
-
313
- ---
314
-
315
- ### <img src="https://img.shields.io/badge/E2B-FFA500?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiMwMDAwMDAiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTggMTJoOCI+PC9wYXRoPjxwYXRoIGQ9Ik0xMiA4djgiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> E2B
316
-
317
- Access various models via the E2B Fragments API (fragments.e2b.dev) through an OpenAI-compatible interface. Uses `cloudscraper` to handle potential Cloudflare protection.
318
-
319
- **Available Models:**
320
-
321
- * `claude-3.7-sonnet`
322
- * `claude-3.5-sonnet`
323
- * `claude-3.5-haiku`
324
- * `o1-mini`, `o3-mini`, `o1`, `o3`
325
- * `gpt-4.5-preview`, `gpt-4o`
326
- * `gpt-4.1`, `gpt-4.1-mini`, `gpt-4.1-nano`
327
- * `gemini-1.5-pro-002`
328
- * `gemini-2.5-pro-exp-03-25`
329
- * `gemini-2.0-flash`, `gemini-2.0-flash-lite`, `gemini-2.0-flash-thinking-exp-01-21`
330
- * `qwen-qwq-32b-preview`
331
- * `grok-beta`
332
- * `deepseek-chat`
333
- * `codestral-2501`
334
- * `mistral-large-latest`
335
- * `llama4-maverick-instruct-basic`, `llama4-scout-instruct-basic`
336
- * `llama-v3p1-405b-instruct`
337
-
338
- > **Note:** The underlying API does not support true streaming. `stream=True` simulates streaming by returning the full response.
339
-
340
- ---
341
-
342
- ## 💻 Usage Examples
343
-
344
- Here are examples of how to use the OpenAI-compatible providers in your code.
345
-
346
- ### Basic Usage with DeepInfra
347
-
348
- ```python
349
- from webscout.Provider.OPENAI import DeepInfra
350
-
351
- # Initialize the client
352
- client = DeepInfra()
353
-
354
- # Create a completion (non-streaming)
355
- response = client.chat.completions.create(
356
- model="meta-llama/Meta-Llama-3.1-8B-Instruct",
357
- messages=[
358
- {"role": "system", "content": "You are a helpful assistant."},
359
- {"role": "user", "content": "Tell me about Python programming."}
360
- ],
361
- temperature=0.7,
362
- max_tokens=500
363
- )
364
-
365
- # Print the response
366
- print(response.choices[0].message.content)
367
- ```
368
-
369
- ### Basic Usage with Glider
370
-
371
- ```python
372
- from webscout.Provider.OPENAI import Glider
373
-
374
- # Initialize the client
375
- client = Glider()
376
-
377
- # Create a completion (non-streaming)
378
- response = client.chat.completions.create(
379
- model="chat-llama-3-1-70b",
380
- messages=[
381
- {"role": "system", "content": "You are a helpful assistant."},
382
- {"role": "user", "content": "Tell me about Python programming."}
383
- ],
384
- max_tokens=500
385
- )
386
-
387
- # Print the response
388
- print(response.choices[0].message.content)
389
- ```
390
-
391
- ### Streaming Responses (Example with DeepInfra)
392
-
393
- ```python
394
- from webscout.Provider.OPENAI import DeepInfra
395
-
396
- # Initialize the client
397
- client = DeepInfra()
398
-
399
- # Create a streaming completion
400
- stream = client.chat.completions.create(
401
- model="meta-llama/Meta-Llama-3.1-8B-Instruct",
402
- messages=[
403
- {"role": "system", "content": "You are a helpful assistant."},
404
- {"role": "user", "content": "Write a short poem about programming."}
405
- ],
406
- stream=True,
407
- temperature=0.7
408
- )
409
-
410
- # Process the streaming response
411
- for chunk in stream:
412
- if chunk.choices[0].delta.content:
413
- print(chunk.choices[0].delta.content, end="", flush=True)
414
- print() # Add a newline at the end
415
- ```
416
-
417
- ### Streaming with Glider
418
-
419
- ```python
420
- from webscout.Provider.OPENAI import Glider
421
-
422
- # Initialize the client
423
- client = Glider()
424
-
425
- # Create a streaming completion
426
- stream = client.chat.completions.create(
427
- model="chat-llama-3-1-70b",
428
- messages=[
429
- {"role": "system", "content": "You are a helpful assistant."},
430
- {"role": "user", "content": "Write a short poem about programming."}
431
- ],
432
- stream=True
433
- )
434
-
435
- # Process the streaming response
436
- for chunk in stream:
437
- if chunk.choices[0].delta.content:
438
- print(chunk.choices[0].delta.content, end="", flush=True)
439
- print() # Add a newline at the end
440
- ```
441
-
442
- ### Basic Usage with ChatGPTClone
443
-
444
- ```python
445
- from webscout.Provider.OPENAI import ChatGPTClone
446
-
447
- # Initialize the client
448
- client = ChatGPTClone()
449
-
450
- # Create a completion (non-streaming)
451
- response = client.chat.completions.create(
452
- model="gpt-4",
453
- messages=[
454
- {"role": "system", "content": "You are a helpful assistant."},
455
- {"role": "user", "content": "Tell me about Python programming."}
456
- ],
457
- temperature=0.7
458
- )
459
-
460
- # Print the response
461
- print(response.choices[0].message.content)
462
- ```
463
-
464
- ### Streaming with ChatGPTClone
465
-
466
- ```python
467
- from webscout.Provider.OPENAI import ChatGPTClone
468
-
469
- # Initialize the client
470
- client = ChatGPTClone()
471
-
472
- # Create a streaming completion
473
- stream = client.chat.completions.create(
474
- model="gpt-4",
475
- messages=[
476
- {"role": "system", "content": "You are a helpful assistant."},
477
- {"role": "user", "content": "Write a short poem about programming."}
478
- ],
479
- stream=True
480
- )
481
-
482
- # Process the streaming response
483
- for chunk in stream:
484
- if chunk.choices[0].delta.content:
485
- print(chunk.choices[0].delta.content, end="", flush=True)
486
- print() # Add a newline at the end
487
- ```
488
-
489
- ### Basic Usage with X0GPT
490
-
491
- ```python
492
- from webscout.Provider.OPENAI import X0GPT
493
-
494
- # Initialize the client
495
- client = X0GPT()
496
-
497
- # Create a completion (non-streaming)
498
- response = client.chat.completions.create(
499
- model="gpt-4", # Model name doesn't matter for X0GPT
500
- messages=[
501
- {"role": "system", "content": "You are a helpful assistant."},
502
- {"role": "user", "content": "Tell me about Python programming."}
503
- ]
504
- )
505
-
506
- # Print the response
507
- print(response.choices[0].message.content)
508
- ```
509
-
510
- ### Streaming with X0GPT
511
-
512
- ```python
513
- from webscout.Provider.OPENAI import X0GPT
514
-
515
- # Initialize the client
516
- client = X0GPT()
517
-
518
- # Create a streaming completion
519
- stream = client.chat.completions.create(
520
- model="gpt-4", # Model name doesn't matter for X0GPT
521
- messages=[
522
- {"role": "system", "content": "You are a helpful assistant."},
523
- {"role": "user", "content": "Write a short poem about programming."}
524
- ],
525
- stream=True
526
- )
527
-
528
- # Process the streaming response
529
- for chunk in stream:
530
- if chunk.choices[0].delta.content:
531
- print(chunk.choices[0].delta.content, end="", flush=True)
532
- print() # Add a newline at the end
533
- ```
534
-
535
- ### Basic Usage with WiseCat
536
-
537
- ```python
538
- from webscout.Provider.OPENAI import WiseCat
539
-
540
- # Initialize the client
541
- client = WiseCat()
542
-
543
- # Create a completion (non-streaming)
544
- response = client.chat.completions.create(
545
- model="chat-model-small",
546
- messages=[
547
- {"role": "system", "content": "You are a helpful assistant."},
548
- {"role": "user", "content": "Tell me about Python programming."}
549
- ]
550
- )
551
-
552
- # Print the response
553
- print(response.choices[0].message.content)
554
- ```
555
-
556
- ### Streaming with WiseCat
557
-
558
- ```python
559
- from webscout.Provider.OPENAI import WiseCat
560
-
561
- # Initialize the client
562
- client = WiseCat()
563
-
564
- # Create a streaming completion
565
- stream = client.chat.completions.create(
566
- model="chat-model-small",
567
- messages=[
568
- {"role": "system", "content": "You are a helpful assistant."},
569
- {"role": "user", "content": "Write a short poem about programming."}
570
- ],
571
- stream=True
572
- )
573
-
574
- # Process the streaming response
575
- for chunk in stream:
576
- if chunk.choices[0].delta.content:
577
- print(chunk.choices[0].delta.content, end="", flush=True)
578
- print() # Add a newline at the end
579
- ```
580
-
581
- ### Basic Usage with Venice
582
-
583
- ```python
584
- from webscout.Provider.OPENAI import Venice
585
-
586
- # Initialize the client
587
- client = Venice(temperature=0.7, top_p=0.9)
588
-
589
- # Create a completion (non-streaming)
590
- response = client.chat.completions.create(
591
- model="mistral-31-24b",
592
- messages=[
593
- {"role": "system", "content": "You are a helpful assistant."},
594
- {"role": "user", "content": "Tell me about Python programming."}
595
- ]
596
- )
597
-
598
- # Print the response
599
- print(response.choices[0].message.content)
600
- ```
601
-
602
- ### Streaming with Venice
603
-
604
- ```python
605
- from webscout.Provider.OPENAI import Venice
606
-
607
- # Initialize the client
608
- client = Venice()
609
-
610
- # Create a streaming completion
611
- stream = client.chat.completions.create(
612
- model="mistral-31-24b",
613
- messages=[
614
- {"role": "system", "content": "You are a helpful assistant."},
615
- {"role": "user", "content": "Write a short poem about programming."}
616
- ],
617
- stream=True
618
- )
619
-
620
- # Process the streaming response
621
- for chunk in stream:
622
- if chunk.choices[0].delta.content:
623
- print(chunk.choices[0].delta.content, end="", flush=True)
624
- print() # Add a newline at the end
625
- ```
626
-
627
- ### Basic Usage with ExaAI
628
-
629
- ```python
630
- from webscout.Provider.OPENAI import ExaAI
631
-
632
- # Initialize the client
633
- client = ExaAI()
634
-
635
- # Create a completion (non-streaming)
636
- response = client.chat.completions.create(
637
- model="O3-Mini",
638
- messages=[
639
- # Note: ExaAI does not support system messages (they will be removed)
640
- {"role": "user", "content": "Hello!"},
641
- {"role": "assistant", "content": "Hi there! How can I help you today?"},
642
- {"role": "user", "content": "Tell me about Python programming."}
643
- ]
644
- )
645
-
646
- # Print the response
647
- print(response.choices[0].message.content)
648
- ```
649
-
650
- ### Basic Usage with HeckAI
651
-
652
- ```python
653
- from webscout.Provider.OPENAI import HeckAI
654
-
655
- # Initialize the client
656
- client = HeckAI(language="English")
657
-
658
- # Create a completion (non-streaming)
659
- response = client.chat.completions.create(
660
- model="google/gemini-2.0-flash-001",
661
- messages=[
662
- {"role": "system", "content": "You are a helpful assistant."},
663
- {"role": "user", "content": "Tell me about Python programming."}
664
- ]
665
- )
666
-
667
- # Print the response
668
- print(response.choices[0].message.content)
669
- ```
670
-
671
- ### Streaming with HeckAI
672
-
673
- ```python
674
- from webscout.Provider.OPENAI import HeckAI
675
-
676
- # Initialize the client
677
- client = HeckAI()
678
-
679
- # Create a streaming completion
680
- stream = client.chat.completions.create(
681
- model="google/gemini-2.0-flash-001",
682
- messages=[
683
- {"role": "system", "content": "You are a helpful assistant."},
684
- {"role": "user", "content": "Write a short poem about programming."}
685
- ],
686
- stream=True
687
- )
688
-
689
- # Process the streaming response
690
- for chunk in stream:
691
- if chunk.choices[0].delta.content:
692
- print(chunk.choices[0].delta.content, end="", flush=True)
693
- print() # Add a newline at the end
694
- ```
695
-
696
- ### Streaming with ExaAI
697
-
698
- ```python
699
- from webscout.Provider.OPENAI import ExaAI
700
-
701
- # Initialize the client
702
- client = ExaAI()
703
-
704
- # Create a streaming completion
705
- stream = client.chat.completions.create(
706
- model="O3-Mini",
707
- messages=[
708
- # Note: ExaAI does not support system messages (they will be removed)
709
- {"role": "user", "content": "Hello!"},
710
- {"role": "assistant", "content": "Hi there! How can I help you today?"},
711
- {"role": "user", "content": "Write a short poem about programming."}
712
- ],
713
- stream=True
714
- )
715
-
716
- # Process the streaming response
717
- for chunk in stream:
718
- if chunk.choices[0].delta.content:
719
- print(chunk.choices[0].delta.content, end="", flush=True)
720
- print() # Add a newline at the end
721
- ```
722
-
723
- ### Basic Usage with TypeGPT
724
-
725
- ```python
726
- from webscout.Provider.OPENAI import TypeGPT
727
-
728
- # Initialize the client
729
- client = TypeGPT()
730
-
731
- # Create a completion (non-streaming)
732
- response = client.chat.completions.create(
733
- model="chatgpt-4o-latest",
734
- messages=[
735
- {"role": "system", "content": "You are a helpful assistant."},
736
- {"role": "user", "content": "Write a short poem about programming."}
737
- ]
738
- )
739
-
740
- # Print the response
741
- print(response.choices[0].message.content)
742
- ```
743
-
744
- ### Streaming with TypeGPT
745
-
746
- ```python
747
- from webscout.Provider.OPENAI import TypeGPT
748
-
749
- # Initialize the client
750
- client = TypeGPT()
751
-
752
- # Create a streaming completion
753
- stream = client.chat.completions.create(
754
- model="chatgpt-4o-latest",
755
- messages=[
756
- {"role": "system", "content": "You are a helpful assistant."},
757
- {"role": "user", "content": "Write a short poem about programming."}
758
- ],
759
- stream=True
760
- )
761
-
762
- # Process the streaming response
763
- for chunk in stream:
764
- if chunk.choices[0].delta.content:
765
- print(chunk.choices[0].delta.content, end="", flush=True)
766
- print() # Add a newline at the end
767
- ```
768
-
769
- ### Basic Usage with SciraChat
770
-
771
- ```python
772
- from webscout.Provider.OPENAI import SciraChat
773
-
774
- # Initialize the client
775
- client = SciraChat()
776
-
777
- # Create a completion (non-streaming)
778
- response = client.chat.completions.create(
779
- model="scira-default",
780
- messages=[
781
- {"role": "system", "content": "You are a helpful assistant."},
782
- {"role": "user", "content": "Tell me about Python programming."}
783
- ]
784
- )
785
-
786
- # Print the response
787
- print(response.choices[0].message.content)
788
- ```
789
-
790
- ### Streaming with SciraChat
791
-
792
- ```python
793
- from webscout.Provider.OPENAI import SciraChat
794
-
795
- # Initialize the client
796
- client = SciraChat()
797
-
798
- # Create a streaming completion
799
- stream = client.chat.completions.create(
800
- model="scira-default",
801
- messages=[
802
- {"role": "system", "content": "You are a helpful assistant."},
803
- {"role": "user", "content": "Write a short poem about programming."}
804
- ],
805
- stream=True
806
- )
807
-
808
- # Process the streaming response
809
- for chunk in stream:
810
- if chunk.choices[0].delta.content:
811
- print(chunk.choices[0].delta.content, end="", flush=True)
812
- print() # Add a newline at the end
813
- ```
814
-
815
- ### Basic Usage with FreeAIChat
816
-
817
- ```python
818
- from webscout.Provider.OPENAI import FreeAIChat
819
-
820
- # Initialize the client
821
- client = FreeAIChat()
822
-
823
- # Create a completion (non-streaming)
824
- response = client.chat.completions.create(
825
- model="GPT 4o",
826
- messages=[
827
- {"role": "system", "content": "You are a helpful assistant."},
828
- {"role": "user", "content": "Tell me about Python programming."}
829
- ]
830
- )
831
-
832
- # Print the response
833
- print(response.choices[0].message.content)
834
- ```
835
-
836
- ### Streaming with FreeAIChat
837
-
838
- ```python
839
- from webscout.Provider.OPENAI import FreeAIChat
840
-
841
- # Initialize the client
842
- client = FreeAIChat()
843
-
844
- # Create a streaming completion
845
- stream = client.chat.completions.create(
846
- model="GPT 4o",
847
- messages=[
848
- {"role": "system", "content": "You are a helpful assistant."},
849
- {"role": "user", "content": "Write a short poem about programming."}
850
- ],
851
- stream=True
852
- )
853
-
854
- # Process the streaming response
855
- for chunk in stream:
856
- if chunk.choices[0].delta.content:
857
- print(chunk.choices[0].delta.content, end="", flush=True)
858
- print() # Add a newline at the end
859
- ```
860
-
861
- ### Basic Usage with LLMChatCo
862
-
863
- ```python
864
- from webscout.Provider.OPENAI import LLMChatCo
865
-
866
- # Initialize the client
867
- client = LLMChatCo()
868
-
869
- # Create a completion (non-streaming)
870
- response = client.chat.completions.create(
871
- model="gemini-flash-2.0", # Default model
872
- messages=[
873
- {"role": "system", "content": "You are a helpful assistant."},
874
- {"role": "user", "content": "Tell me about Python programming."}
875
- ],
876
- temperature=0.7
877
- )
878
-
879
- # Print the response
880
- print(response.choices[0].message.content)
881
- ```
882
-
883
- ### Streaming with LLMChatCo
884
-
885
- ```python
886
- from webscout.Provider.OPENAI import LLMChatCo
887
-
888
- # Initialize the client
889
- client = LLMChatCo()
890
-
891
- # Create a streaming completion
892
- stream = client.chat.completions.create(
893
- model="gemini-flash-2.0",
894
- messages=[
895
- {"role": "system", "content": "You are a helpful assistant."},
896
- {"role": "user", "content": "Write a short poem about programming."}
897
- ],
898
- stream=True
899
- )
900
-
901
- # Process the streaming response
902
- for chunk in stream:
903
- if chunk.choices[0].delta.content:
904
- print(chunk.choices[0].delta.content, end="", flush=True)
905
- print() # Add a newline at the end
906
- ```
907
-
908
- ### Basic Usage with YEPCHAT
909
-
910
- ```python
911
- from webscout.Provider.OPENAI import YEPCHAT
912
-
913
- # Initialize the client
914
- client = YEPCHAT()
915
-
916
- # Create a completion (non-streaming)
917
- response = client.chat.completions.create(
918
- model="DeepSeek-R1-Distill-Qwen-32B",
919
- messages=[
920
- {"role": "system", "content": "You are a helpful assistant."},
921
- {"role": "user", "content": "Tell me about Python programming."}
922
- ],
923
- temperature=0.7
924
- )
925
-
926
- # Print the response
927
- print(response.choices[0].message.content)
928
- ```
929
-
930
- ### Basic Usage with SonusAI
931
-
932
- ```python
933
- from webscout.Provider.OPENAI import SonusAI
934
-
935
- # Initialize the client
936
- client = SonusAI()
937
-
938
- # Create a completion (non-streaming)
939
- response = client.chat.completions.create(
940
- model="pro", # Choose from 'pro', 'air', or 'mini'
941
- messages=[
942
- {"role": "system", "content": "You are a helpful assistant."},
943
- {"role": "user", "content": "Tell me about Python programming."}
944
- ],
945
- reasoning=True # Optional: Enable reasoning mode
946
- )
947
-
948
- # Print the response
949
- print(response.choices[0].message.content)
950
- ```
951
-
952
- ### Streaming with YEPCHAT
953
-
954
- ```python
955
- from webscout.Provider.OPENAI import YEPCHAT
956
-
957
- # Initialize the client
958
- client = YEPCHAT()
959
-
960
- # Create a streaming completion
961
- stream = client.chat.completions.create(
962
- model="Mixtral-8x7B-Instruct-v0.1",
963
- messages=[
964
- {"role": "system", "content": "You are a helpful assistant."},
965
- {"role": "user", "content": "Write a short poem about programming."}
966
- ],
967
- stream=True
968
- )
969
-
970
- # Process the streaming response
971
- for chunk in stream:
972
- if chunk.choices[0].delta.content:
973
- print(chunk.choices[0].delta.content, end="", flush=True)
974
- print() # Add a newline at the end
975
- ```
976
-
977
- ### Streaming with SonusAI
978
-
979
- ```python
980
- from webscout.Provider.OPENAI import SonusAI
981
-
982
- # Initialize the client
983
- client = SonusAI(timeout=60)
984
-
985
- # Create a streaming completion
986
- stream = client.chat.completions.create(
987
- model="air",
988
- messages=[
989
- {"role": "system", "content": "You are a helpful assistant."},
990
- {"role": "user", "content": "Write a short poem about programming."}
991
- ],
992
- stream=True
993
- )
994
-
995
- # Process the streaming response
996
- for chunk in stream:
997
- if chunk.choices[0].delta.content:
998
- print(chunk.choices[0].delta.content, end="", flush=True)
999
- print() # Add a newline at the end
1000
- ```
1001
-
1002
- ### Basic Usage with ExaChat
1003
-
1004
- ```python
1005
- from webscout.Provider.OPENAI import ExaChat
1006
-
1007
- # Initialize the client
1008
- client = ExaChat()
1009
-
1010
- # Create a completion (non-streaming)
1011
- response = client.chat.completions.create(
1012
- model="exaanswer", # Choose from many available models
1013
- messages=[
1014
- {"role": "system", "content": "You are a helpful assistant."},
1015
- {"role": "user", "content": "Tell me about Python programming."}
1016
- ]
1017
- )
1018
-
1019
- # Print the response
1020
- print(response.choices[0].message.content)
1021
- ```
1022
-
1023
- ### Using Different ExaChat Providers
1024
-
1025
- ```python
1026
- from webscout.Provider.OPENAI import ExaChat
1027
-
1028
- # Initialize the client
1029
- client = ExaChat(timeout=60)
1030
-
1031
- # Use a Gemini model
1032
- gemini_response = client.chat.completions.create(
1033
- model="gemini-2.0-flash",
1034
- messages=[
1035
- {"role": "system", "content": "You are a helpful assistant."},
1036
- {"role": "user", "content": "Explain quantum computing in simple terms."}
1037
- ]
1038
- )
1039
-
1040
- # Use a Groq model
1041
- groq_response = client.chat.completions.create(
1042
- model="llama-3.1-8b-instant",
1043
- messages=[
1044
- {"role": "user", "content": "Tell me about Python programming."}
1045
- ]
1046
- )
1047
-
1048
- # Print the response
1049
- print(response.choices[0].message.content)
1050
- ```
1051
-
1052
- ### Streaming with Netwrck
1053
-
1054
- ```python
1055
- from webscout.Provider.OPENAI import Netwrck
1056
-
1057
- # Initialize the client
1058
- client = Netwrck(timeout=60)
1059
-
1060
- # Create a streaming completion
1061
- stream = client.chat.completions.create(
1062
- model="openai/gpt-4o-mini",
1063
- messages=[
1064
- {"role": "system", "content": "You are a helpful assistant."},
1065
- {"role": "user", "content": "Write a short poem about programming."}
1066
- ],
1067
- stream=True
1068
- )
1069
-
1070
- # Process the streaming response
1071
- for chunk in stream:
1072
- if chunk.choices[0].delta.content:
1073
- print(chunk.choices[0].delta.content, end="", flush=True)
1074
- print() # Add a newline at the end
1075
- ```
1076
-
1077
- ### Basic Usage with StandardInput
1078
-
1079
- ```python
1080
- from webscout.Provider.OPENAI import StandardInput
1081
-
1082
- # Initialize the client
1083
- client = StandardInput()
1084
-
1085
- # Create a completion (non-streaming)
1086
- response = client.chat.completions.create(
1087
- model="standard-quick",
1088
- messages=[
1089
- {"role": "system", "content": "You are a helpful assistant."},
1090
- {"role": "user", "content": "Tell me about Python programming."}
1091
- ]
1092
- )
1093
-
1094
- # Print the response
1095
- print(response.choices[0].message.content)
1096
- ```
1097
-
1098
- ### Streaming with StandardInput
1099
-
1100
- ```python
1101
- from webscout.Provider.OPENAI import StandardInput
1102
-
1103
- # Initialize the client
1104
- client = StandardInput()
1105
-
1106
- # Create a streaming completion
1107
- stream = client.chat.completions.create(
1108
- model="standard-reasoning",
1109
- messages=[
1110
- {"role": "system", "content": "You are a helpful assistant."},
1111
- {"role": "user", "content": "Count from 1 to 5."}
1112
- ],
1113
- stream=True,
1114
- enable_reasoning=True # Enable reasoning capabilities
1115
- )
1116
-
1117
- # Process the streaming response
1118
- for chunk in stream:
1119
- if chunk.choices[0].delta.content:
1120
- print(chunk.choices[0].delta.content, end="", flush=True)
1121
- print() # Add a newline at the end
1122
- ```
1123
-
1124
- ## 🔄 Response Format
1125
-
1126
- All providers return responses that mimic the OpenAI API structure, ensuring compatibility with tools built for OpenAI.
1127
-
1128
- ### 📝 Non-streaming Response
1129
-
1130
- ```json
1131
- {
1132
- "id": "chatcmpl-123abc",
1133
- "object": "chat.completion",
1134
- "created": 1677858242,
1135
- "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
1136
- "usage": {
1137
- "prompt_tokens": 13,
1138
- "completion_tokens": 7,
1139
- "total_tokens": 20
1140
- },
1141
- "choices": [
1142
- {
1143
- "message": {
1144
- "role": "assistant",
1145
- "content": "This is a response from the model."
1146
- },
1147
- "finish_reason": "stop",
1148
- "index": 0
1149
- }
1150
- ]
1151
- }
1152
- ```
1153
-
1154
- ### 📱 Streaming Response Chunks
1155
-
1156
- ```json
1157
- {
1158
- "id": "chatcmpl-123abc",
1159
- "object": "chat.completion.chunk",
1160
- "created": 1677858242,
1161
- "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
1162
- "choices": [
1163
- {
1164
- "delta": {
1165
- "content": "This "
1166
- },
1167
- "finish_reason": null,
1168
- "index": 0
1169
- }
1170
- ]
1171
- }
1172
- ```
1173
-
1174
- ## 🧩 Architecture
1175
-
1176
- The OpenAI-compatible providers are built on a modular architecture:
1177
-
1178
- * `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
1179
- * `utils.py`: Provides data structures that mimic OpenAI's response format
1180
- * Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
1181
-
1182
- This architecture makes it easy to add new providers while maintaining a consistent interface.
1183
-
1184
- ## 📝 Notes
1185
-
1186
- * Some providers may require API keys for full functionality
1187
- * Not all OpenAI features are supported by all providers
1188
- * Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
1189
-
1190
- ## 🤝 Contributing
1191
-
1192
- Want to add a new OpenAI-compatible provider? Follow these steps:
1193
-
1194
- 1. Create a new file in the `webscout/Provider/OPENAI` directory
1195
- 2. Implement the `OpenAICompatibleProvider` interface
1196
- 3. Add appropriate tests
1197
- 4. Update this README with information about the new provider
1198
-
1199
- ## 📚 Related Documentation
1200
-
1201
- * [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
1202
- * [DeepInfra Documentation](https://deepinfra.com/docs)
1203
- * [Glider.so Website](https://glider.so/)
1204
- * [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
1205
- * [X0GPT Website](https://x0-gpt.devwtf.in/)
1206
- * [WiseCat Website](https://wise-cat-groq.vercel.app/)
1207
- * [Venice AI Website](https://venice.ai/)
1208
- * [ExaAI Website](https://o3minichat.exa.ai/)
1209
- * [TypeGPT Website](https://chat.typegpt.net/)
1210
- * [SciraChat Website](https://scira.ai/)
1211
- * [FreeAIChat Website](https://freeaichatplayground.com/)
1212
- * [LLMChatCo Website](https://llmchat.co/)
1213
- * [Yep.com Website](https://yep.com/)
1214
- * [HeckAI Website](https://heck.ai/)
1215
- * [SonusAI Website](https://chat.sonus.ai/)
1216
- * [ExaChat Website](https://exa-chat.vercel.app/)
1217
- * [Netwrck Website](https://netwrck.com/)
1218
- * [StandardInput Website](https://chat.standard-input.com/)
1219
-
1220
- <div align="center">
1221
- <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
1222
- <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
1223
- </div>
1224
-
1225
- ## Flowith OpenAI-Compatible Provider
1226
-
1227
- This provider allows you to use the Flowith API with an OpenAI-compatible interface. It supports the following models:
1228
-
1229
- - gpt-4.1-mini
1230
- - deepseek-chat
1231
- - deepseek-reasoner
1232
- - claude-3.5-haiku
1233
- - gemini-2.0-flash
1234
- - gemini-2.5-flash
1235
- - grok-3-mini
1236
-
1237
- ### Usage Example
1238
-
1239
- ```python
1240
- from Provider.OPENAI.flowith import Flowith
1241
-
1242
- client = Flowith()
1243
- response = client.chat.completions.create(
1244
- model="gpt-4.1-mini",
1245
- messages=[{"role": "user", "content": "Hello!"}]
1246
- )
1247
- print(response.choices[0].message.content)
1248
- ```
1249
-
1250
- - `AVAILABLE_MODELS` and `models()` are provided for model discovery.
1251
- - The provider is compatible with the OpenAI API interface used in this project.
1252
-
1253
- See the source code for more details and advanced usage.
1
+ <div align="center">
2
+ <a href="https://github.com/OEvortex/Webscout">
3
+ <img src="https://img.shields.io/badge/WebScout-OpenAI%20Compatible%20Providers-4285F4?style=for-the-badge&logo=openai&logoColor=white" alt="WebScout OpenAI Compatible Providers">
4
+ </a>
5
+ <br/>
6
+ <h1>WebScout OpenAI-Compatible Providers</h1>
7
+ <p><strong>Seamlessly integrate with various AI providers using OpenAI-compatible interfaces</strong></p>
8
+
9
+ <p>
10
+ <img src="https://img.shields.io/badge/Python-3.7+-3776AB?style=flat-square&logo=python&logoColor=white" alt="Python 3.7+">
11
+ <img src="https://img.shields.io/badge/License-MIT-green?style=flat-square" alt="License: MIT">
12
+ <img src="https://img.shields.io/badge/PRs-Welcome-brightgreen?style=flat-square" alt="PRs Welcome">
13
+ </p>
14
+
15
+ <p>
16
+ Access multiple AI providers through a standardized OpenAI-compatible interface, making it easy to switch between providers without changing your code.
17
+ </p>
18
+ </div>
19
+
20
+ ## 🚀 Overview
21
+
22
+ The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
+
24
+ * Use the same code structure across different AI providers
25
+ * Switch between providers without major code changes
26
+ * Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
+
28
+ ## ⚙️ Available Providers
29
+
30
+ Currently, the following providers are implemented with OpenAI-compatible interfaces:
31
+
32
+ - DeepInfra
33
+ - Glider
34
+ - ChatGPTClone
35
+ - X0GPT
36
+ - WiseCat
37
+ - Venice
38
+ - ExaAI
39
+ - TypeGPT
40
+ - SciraChat
41
+ - LLMChatCo
42
+ - FreeAIChat
43
+ - YEPCHAT
44
+ - HeckAI
45
+ - SonusAI
46
+ - ExaChat
47
+ - Netwrck
48
+ - StandardInput
49
+ - Writecream
50
+ - toolbaz
51
+ - UncovrAI
52
+ - OPKFC
53
+ - TextPollinations
54
+ - E2B
55
+ - MultiChatAI
56
+ - AI4Chat
57
+ - MCPCore
58
+ - TypefullyAI
59
+ - Flowith
60
+ - ChatSandbox
61
+ - Cloudflare
62
+ - NEMOTRON
63
+ - BLACKBOXAI
64
+ - Copilot
65
+ - TwoAI
66
+ - oivscode
67
+ - Qwen3
68
+ ---
69
+
70
+
71
+ ## 💻 Usage Examples
72
+
73
+ Here are examples of how to use the OpenAI-compatible providers in your code.
74
+
75
+ ### Basic Usage with DeepInfra
76
+
77
+ ```python
78
+ from webscout.client import DeepInfra
79
+
80
+ # Initialize the client
81
+ client = DeepInfra()
82
+
83
+ # Create a completion (non-streaming)
84
+ response = client.chat.completions.create(
85
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
86
+ messages=[
87
+ {"role": "system", "content": "You are a helpful assistant."},
88
+ {"role": "user", "content": "Tell me about Python programming."}
89
+ ],
90
+ temperature=0.7,
91
+ max_tokens=500
92
+ )
93
+
94
+ # Print the response
95
+ print(response.choices[0].message.content)
96
+ ```
97
+
98
+ ### Basic Usage with Glider
99
+
100
+ ```python
101
+ from webscout.client import Glider
102
+
103
+ # Initialize the client
104
+ client = Glider()
105
+
106
+ # Create a completion (non-streaming)
107
+ response = client.chat.completions.create(
108
+ model="chat-llama-3-1-70b",
109
+ messages=[
110
+ {"role": "system", "content": "You are a helpful assistant."},
111
+ {"role": "user", "content": "Tell me about Python programming."}
112
+ ],
113
+ max_tokens=500
114
+ )
115
+
116
+ # Print the response
117
+ print(response.choices[0].message.content)
118
+ ```
119
+
120
+ ### Streaming Responses (Example with DeepInfra)
121
+
122
+ ```python
123
+ from webscout.client import DeepInfra
124
+
125
+ # Initialize the client
126
+ client = DeepInfra()
127
+
128
+ # Create a streaming completion
129
+ stream = client.chat.completions.create(
130
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
131
+ messages=[
132
+ {"role": "system", "content": "You are a helpful assistant."},
133
+ {"role": "user", "content": "Write a short poem about programming."}
134
+ ],
135
+ stream=True,
136
+ temperature=0.7
137
+ )
138
+
139
+ # Process the streaming response
140
+ for chunk in stream:
141
+ if chunk.choices[0].delta.content:
142
+ print(chunk.choices[0].delta.content, end="", flush=True)
143
+ print() # Add a newline at the end
144
+ ```
145
+
146
+ ### Streaming with Glider
147
+
148
+ ```python
149
+ from webscout.client import Glider
150
+
151
+ # Initialize the client
152
+ client = Glider()
153
+
154
+ # Create a streaming completion
155
+ stream = client.chat.completions.create(
156
+ model="chat-llama-3-1-70b",
157
+ messages=[
158
+ {"role": "system", "content": "You are a helpful assistant."},
159
+ {"role": "user", "content": "Write a short poem about programming."}
160
+ ],
161
+ stream=True
162
+ )
163
+
164
+ # Process the streaming response
165
+ for chunk in stream:
166
+ if chunk.choices[0].delta.content:
167
+ print(chunk.choices[0].delta.content, end="", flush=True)
168
+ print() # Add a newline at the end
169
+ ```
170
+
171
+ ### Basic Usage with ChatGPTClone
172
+
173
+ ```python
174
+ from webscout.client import ChatGPTClone
175
+
176
+ # Initialize the client
177
+ client = ChatGPTClone()
178
+
179
+ # Create a completion (non-streaming)
180
+ response = client.chat.completions.create(
181
+ model="gpt-4",
182
+ messages=[
183
+ {"role": "system", "content": "You are a helpful assistant."},
184
+ {"role": "user", "content": "Tell me about Python programming."}
185
+ ],
186
+ temperature=0.7
187
+ )
188
+
189
+ # Print the response
190
+ print(response.choices[0].message.content)
191
+ ```
192
+
193
+ ### Streaming with ChatGPTClone
194
+
195
+ ```python
196
+ from webscout.client import ChatGPTClone
197
+
198
+ # Initialize the client
199
+ client = ChatGPTClone()
200
+
201
+ # Create a streaming completion
202
+ stream = client.chat.completions.create(
203
+ model="gpt-4",
204
+ messages=[
205
+ {"role": "system", "content": "You are a helpful assistant."},
206
+ {"role": "user", "content": "Write a short poem about programming."}
207
+ ],
208
+ stream=True
209
+ )
210
+
211
+ # Process the streaming response
212
+ for chunk in stream:
213
+ if chunk.choices[0].delta.content:
214
+ print(chunk.choices[0].delta.content, end="", flush=True)
215
+ print() # Add a newline at the end
216
+ ```
217
+
218
+ ### Basic Usage with X0GPT
219
+
220
+ ```python
221
+ from webscout.client import X0GPT
222
+
223
+ # Initialize the client
224
+ client = X0GPT()
225
+
226
+ # Create a completion (non-streaming)
227
+ response = client.chat.completions.create(
228
+ model="gpt-4", # Model name doesn't matter for X0GPT
229
+ messages=[
230
+ {"role": "system", "content": "You are a helpful assistant."},
231
+ {"role": "user", "content": "Tell me about Python programming."}
232
+ ]
233
+ )
234
+
235
+ # Print the response
236
+ print(response.choices[0].message.content)
237
+ ```
238
+
239
+ ### Streaming with X0GPT
240
+
241
+ ```python
242
+ from webscout.client import X0GPT
243
+
244
+ # Initialize the client
245
+ client = X0GPT()
246
+
247
+ # Create a streaming completion
248
+ stream = client.chat.completions.create(
249
+ model="gpt-4", # Model name doesn't matter for X0GPT
250
+ messages=[
251
+ {"role": "system", "content": "You are a helpful assistant."},
252
+ {"role": "user", "content": "Write a short poem about programming."}
253
+ ],
254
+ stream=True
255
+ )
256
+
257
+ # Process the streaming response
258
+ for chunk in stream:
259
+ if chunk.choices[0].delta.content:
260
+ print(chunk.choices[0].delta.content, end="", flush=True)
261
+ print() # Add a newline at the end
262
+ ```
263
+
264
+ ### Basic Usage with WiseCat
265
+
266
+ ```python
267
+ from webscout.client import WiseCat
268
+
269
+ # Initialize the client
270
+ client = WiseCat()
271
+
272
+ # Create a completion (non-streaming)
273
+ response = client.chat.completions.create(
274
+ model="chat-model-small",
275
+ messages=[
276
+ {"role": "system", "content": "You are a helpful assistant."},
277
+ {"role": "user", "content": "Tell me about Python programming."}
278
+ ]
279
+ )
280
+
281
+ # Print the response
282
+ print(response.choices[0].message.content)
283
+ ```
284
+
285
+ ### Streaming with WiseCat
286
+
287
+ ```python
288
+ from webscout.client import WiseCat
289
+
290
+ # Initialize the client
291
+ client = WiseCat()
292
+
293
+ # Create a streaming completion
294
+ stream = client.chat.completions.create(
295
+ model="chat-model-small",
296
+ messages=[
297
+ {"role": "system", "content": "You are a helpful assistant."},
298
+ {"role": "user", "content": "Write a short poem about programming."}
299
+ ],
300
+ stream=True
301
+ )
302
+
303
+ # Process the streaming response
304
+ for chunk in stream:
305
+ if chunk.choices[0].delta.content:
306
+ print(chunk.choices[0].delta.content, end="", flush=True)
307
+ print() # Add a newline at the end
308
+ ```
309
+
310
+ ### Basic Usage with Venice
311
+
312
+ ```python
313
+ from webscout.client import Venice
314
+
315
+ # Initialize the client
316
+ client = Venice(temperature=0.7, top_p=0.9)
317
+
318
+ # Create a completion (non-streaming)
319
+ response = client.chat.completions.create(
320
+ model="mistral-31-24b",
321
+ messages=[
322
+ {"role": "system", "content": "You are a helpful assistant."},
323
+ {"role": "user", "content": "Tell me about Python programming."}
324
+ ]
325
+ )
326
+
327
+ # Print the response
328
+ print(response.choices[0].message.content)
329
+ ```
330
+
331
+ ### Streaming with Venice
332
+
333
+ ```python
334
+ from webscout.client import Venice
335
+
336
+ # Initialize the client
337
+ client = Venice()
338
+
339
+ # Create a streaming completion
340
+ stream = client.chat.completions.create(
341
+ model="mistral-31-24b",
342
+ messages=[
343
+ {"role": "system", "content": "You are a helpful assistant."},
344
+ {"role": "user", "content": "Write a short poem about programming."}
345
+ ],
346
+ stream=True
347
+ )
348
+
349
+ # Process the streaming response
350
+ for chunk in stream:
351
+ if chunk.choices[0].delta.content:
352
+ print(chunk.choices[0].delta.content, end="", flush=True)
353
+ print() # Add a newline at the end
354
+ ```
355
+
356
+ ### Basic Usage with ExaAI
357
+
358
+ ```python
359
+ from webscout.client import ExaAI
360
+
361
+ # Initialize the client
362
+ client = ExaAI()
363
+
364
+ # Create a completion (non-streaming)
365
+ response = client.chat.completions.create(
366
+ model="O3-Mini",
367
+ messages=[
368
+ # Note: ExaAI does not support system messages (they will be removed)
369
+ {"role": "user", "content": "Hello!"},
370
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
371
+ {"role": "user", "content": "Tell me about Python programming."}
372
+ ]
373
+ )
374
+
375
+ # Print the response
376
+ print(response.choices[0].message.content)
377
+ ```
378
+
379
+ ### Basic Usage with HeckAI
380
+
381
+ ```python
382
+ from webscout.client import HeckAI
383
+
384
+ # Initialize the client
385
+ client = HeckAI(language="English")
386
+
387
+ # Create a completion (non-streaming)
388
+ response = client.chat.completions.create(
389
+ model="google/gemini-2.0-flash-001",
390
+ messages=[
391
+ {"role": "system", "content": "You are a helpful assistant."},
392
+ {"role": "user", "content": "Tell me about Python programming."}
393
+ ]
394
+ )
395
+
396
+ # Print the response
397
+ print(response.choices[0].message.content)
398
+ ```
399
+
400
+ ### Streaming with HeckAI
401
+
402
+ ```python
403
+ from webscout.client import HeckAI
404
+
405
+ # Initialize the client
406
+ client = HeckAI()
407
+
408
+ # Create a streaming completion
409
+ stream = client.chat.completions.create(
410
+ model="google/gemini-2.0-flash-001",
411
+ messages=[
412
+ {"role": "system", "content": "You are a helpful assistant."},
413
+ {"role": "user", "content": "Write a short poem about programming."}
414
+ ],
415
+ stream=True
416
+ )
417
+
418
+ # Process the streaming response
419
+ for chunk in stream:
420
+ if chunk.choices[0].delta.content:
421
+ print(chunk.choices[0].delta.content, end="", flush=True)
422
+ print() # Add a newline at the end
423
+ ```
424
+
425
+ ### Streaming with ExaAI
426
+
427
+ ```python
428
+ from webscout.client import ExaAI
429
+
430
+ # Initialize the client
431
+ client = ExaAI()
432
+
433
+ # Create a streaming completion
434
+ stream = client.chat.completions.create(
435
+ model="O3-Mini",
436
+ messages=[
437
+ # Note: ExaAI does not support system messages (they will be removed)
438
+ {"role": "user", "content": "Hello!"},
439
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
440
+ {"role": "user", "content": "Write a short poem about programming."}
441
+ ],
442
+ stream=True
443
+ )
444
+
445
+ # Process the streaming response
446
+ for chunk in stream:
447
+ if chunk.choices[0].delta.content:
448
+ print(chunk.choices[0].delta.content, end="", flush=True)
449
+ print() # Add a newline at the end
450
+ ```
451
+
452
+ ### Basic Usage with TypeGPT
453
+
454
+ ```python
455
+ from webscout.client import TypeGPT
456
+
457
+ # Initialize the client
458
+ client = TypeGPT()
459
+
460
+ # Create a completion (non-streaming)
461
+ response = client.chat.completions.create(
462
+ model="chatgpt-4o-latest",
463
+ messages=[
464
+ {"role": "system", "content": "You are a helpful assistant."},
465
+ {"role": "user", "content": "Write a short poem about programming."}
466
+ ]
467
+ )
468
+
469
+ # Print the response
470
+ print(response.choices[0].message.content)
471
+ ```
472
+
473
+ ### Streaming with TypeGPT
474
+
475
+ ```python
476
+ from webscout.client import TypeGPT
477
+
478
+ # Initialize the client
479
+ client = TypeGPT()
480
+
481
+ # Create a streaming completion
482
+ stream = client.chat.completions.create(
483
+ model="chatgpt-4o-latest",
484
+ messages=[
485
+ {"role": "system", "content": "You are a helpful assistant."},
486
+ {"role": "user", "content": "Write a short poem about programming."}
487
+ ],
488
+ stream=True
489
+ )
490
+
491
+ # Process the streaming response
492
+ for chunk in stream:
493
+ if chunk.choices[0].delta.content:
494
+ print(chunk.choices[0].delta.content, end="", flush=True)
495
+ print() # Add a newline at the end
496
+ ```
497
+
498
+ ### Basic Usage with SciraChat
499
+
500
+ ```python
501
+ from webscout.client import SciraChat
502
+
503
+ # Initialize the client
504
+ client = SciraChat()
505
+
506
+ # Create a completion (non-streaming)
507
+ response = client.chat.completions.create(
508
+ model="scira-default",
509
+ messages=[
510
+ {"role": "system", "content": "You are a helpful assistant."},
511
+ {"role": "user", "content": "Tell me about Python programming."}
512
+ ]
513
+ )
514
+
515
+ # Print the response
516
+ print(response.choices[0].message.content)
517
+ ```
518
+
519
+ ### Streaming with SciraChat
520
+
521
+ ```python
522
+ from webscout.client import SciraChat
523
+
524
+ # Initialize the client
525
+ client = SciraChat()
526
+
527
+ # Create a streaming completion
528
+ stream = client.chat.completions.create(
529
+ model="scira-default",
530
+ messages=[
531
+ {"role": "system", "content": "You are a helpful assistant."},
532
+ {"role": "user", "content": "Write a short poem about programming."}
533
+ ],
534
+ stream=True
535
+ )
536
+
537
+ # Process the streaming response
538
+ for chunk in stream:
539
+ if chunk.choices[0].delta.content:
540
+ print(chunk.choices[0].delta.content, end="", flush=True)
541
+ print() # Add a newline at the end
542
+ ```
543
+
544
+ ### Basic Usage with FreeAIChat
545
+
546
+ ```python
547
+ from webscout.client import FreeAIChat
548
+
549
+ # Initialize the client
550
+ client = FreeAIChat()
551
+
552
+ # Create a completion (non-streaming)
553
+ response = client.chat.completions.create(
554
+ model="GPT 4o",
555
+ messages=[
556
+ {"role": "system", "content": "You are a helpful assistant."},
557
+ {"role": "user", "content": "Tell me about Python programming."}
558
+ ]
559
+ )
560
+
561
+ # Print the response
562
+ print(response.choices[0].message.content)
563
+ ```
564
+
565
+ ### Streaming with FreeAIChat
566
+
567
+ ```python
568
+ from webscout.client import FreeAIChat
569
+
570
+ # Initialize the client
571
+ client = FreeAIChat()
572
+
573
+ # Create a streaming completion
574
+ stream = client.chat.completions.create(
575
+ model="GPT 4o",
576
+ messages=[
577
+ {"role": "system", "content": "You are a helpful assistant."},
578
+ {"role": "user", "content": "Write a short poem about programming."}
579
+ ],
580
+ stream=True
581
+ )
582
+
583
+ # Process the streaming response
584
+ for chunk in stream:
585
+ if chunk.choices[0].delta.content:
586
+ print(chunk.choices[0].delta.content, end="", flush=True)
587
+ print() # Add a newline at the end
588
+ ```
589
+
590
+ ### Basic Usage with LLMChatCo
591
+
592
+ ```python
593
+ from webscout.client import LLMChatCo
594
+
595
+ # Initialize the client
596
+ client = LLMChatCo()
597
+
598
+ # Create a completion (non-streaming)
599
+ response = client.chat.completions.create(
600
+ model="gemini-flash-2.0", # Default model
601
+ messages=[
602
+ {"role": "system", "content": "You are a helpful assistant."},
603
+ {"role": "user", "content": "Tell me about Python programming."}
604
+ ],
605
+ temperature=0.7
606
+ )
607
+
608
+ # Print the response
609
+ print(response.choices[0].message.content)
610
+ ```
611
+
612
+ ### Streaming with LLMChatCo
613
+
614
+ ```python
615
+ from webscout.client import LLMChatCo
616
+
617
+ # Initialize the client
618
+ client = LLMChatCo()
619
+
620
+ # Create a streaming completion
621
+ stream = client.chat.completions.create(
622
+ model="gemini-flash-2.0",
623
+ messages=[
624
+ {"role": "system", "content": "You are a helpful assistant."},
625
+ {"role": "user", "content": "Write a short poem about programming."}
626
+ ],
627
+ stream=True
628
+ )
629
+
630
+ # Process the streaming response
631
+ for chunk in stream:
632
+ if chunk.choices[0].delta.content:
633
+ print(chunk.choices[0].delta.content, end="", flush=True)
634
+ print() # Add a newline at the end
635
+ ```
636
+
637
+ ### Basic Usage with YEPCHAT
638
+
639
+ ```python
640
+ from webscout.client import YEPCHAT
641
+
642
+ # Initialize the client
643
+ client = YEPCHAT()
644
+
645
+ # Create a completion (non-streaming)
646
+ response = client.chat.completions.create(
647
+ model="DeepSeek-R1-Distill-Qwen-32B",
648
+ messages=[
649
+ {"role": "system", "content": "You are a helpful assistant."},
650
+ {"role": "user", "content": "Tell me about Python programming."}
651
+ ],
652
+ temperature=0.7
653
+ )
654
+
655
+ # Print the response
656
+ print(response.choices[0].message.content)
657
+ ```
658
+
659
+ ### Basic Usage with SonusAI
660
+
661
+ ```python
662
+ from webscout.client import SonusAI
663
+
664
+ # Initialize the client
665
+ client = SonusAI()
666
+
667
+ # Create a completion (non-streaming)
668
+ response = client.chat.completions.create(
669
+ model="pro", # Choose from 'pro', 'air', or 'mini'
670
+ messages=[
671
+ {"role": "system", "content": "You are a helpful assistant."},
672
+ {"role": "user", "content": "Tell me about Python programming."}
673
+ ],
674
+ reasoning=True # Optional: Enable reasoning mode
675
+ )
676
+
677
+ # Print the response
678
+ print(response.choices[0].message.content)
679
+ ```
680
+
681
+ ### Streaming with YEPCHAT
682
+
683
+ ```python
684
+ from webscout.client import YEPCHAT
685
+
686
+ # Initialize the client
687
+ client = YEPCHAT()
688
+
689
+ # Create a streaming completion
690
+ stream = client.chat.completions.create(
691
+ model="Mixtral-8x7B-Instruct-v0.1",
692
+ messages=[
693
+ {"role": "system", "content": "You are a helpful assistant."},
694
+ {"role": "user", "content": "Write a short poem about programming."}
695
+ ],
696
+ stream=True
697
+ )
698
+
699
+ # Process the streaming response
700
+ for chunk in stream:
701
+ if chunk.choices[0].delta.content:
702
+ print(chunk.choices[0].delta.content, end="", flush=True)
703
+ print() # Add a newline at the end
704
+ ```
705
+
706
+ ### Streaming with SonusAI
707
+
708
+ ```python
709
+ from webscout.client import SonusAI
710
+
711
+ # Initialize the client
712
+ client = SonusAI(timeout=60)
713
+
714
+ # Create a streaming completion
715
+ stream = client.chat.completions.create(
716
+ model="air",
717
+ messages=[
718
+ {"role": "system", "content": "You are a helpful assistant."},
719
+ {"role": "user", "content": "Write a short poem about programming."}
720
+ ],
721
+ stream=True
722
+ )
723
+
724
+ # Process the streaming response
725
+ for chunk in stream:
726
+ if chunk.choices[0].delta.content:
727
+ print(chunk.choices[0].delta.content, end="", flush=True)
728
+ print() # Add a newline at the end
729
+ ```
730
+
731
+ ### Basic Usage with ExaChat
732
+
733
+ ```python
734
+ from webscout.client import ExaChat
735
+
736
+ # Initialize the client
737
+ client = ExaChat()
738
+
739
+ # Create a completion (non-streaming)
740
+ response = client.chat.completions.create(
741
+ model="exaanswer", # Choose from many available models
742
+ messages=[
743
+ {"role": "system", "content": "You are a helpful assistant."},
744
+ {"role": "user", "content": "Tell me about Python programming."}
745
+ ]
746
+ )
747
+
748
+ # Print the response
749
+ print(response.choices[0].message.content)
750
+ ```
751
+
752
+ ### Using Different ExaChat Providers
753
+
754
+ ```python
755
+ from webscout.client import ExaChat
756
+
757
+ # Initialize the client
758
+ client = ExaChat(timeout=60)
759
+
760
+ # Use a Gemini model
761
+ gemini_response = client.chat.completions.create(
762
+ model="gemini-2.0-flash",
763
+ messages=[
764
+ {"role": "system", "content": "You are a helpful assistant."},
765
+ {"role": "user", "content": "Explain quantum computing in simple terms."}
766
+ ]
767
+ )
768
+
769
+ # Use a Groq model
770
+ groq_response = client.chat.completions.create(
771
+ model="llama-3.1-8b-instant",
772
+ messages=[
773
+ {"role": "user", "content": "Tell me about Python programming."}
774
+ ]
775
+ )
776
+
777
+ # Print the response
778
+ print(response.choices[0].message.content)
779
+ ```
780
+
781
+ ### Streaming with Netwrck
782
+
783
+ ```python
784
+ from webscout.client import Netwrck
785
+
786
+ # Initialize the client
787
+ client = Netwrck(timeout=60)
788
+
789
+ # Create a streaming completion
790
+ stream = client.chat.completions.create(
791
+ model="openai/gpt-4o-mini",
792
+ messages=[
793
+ {"role": "system", "content": "You are a helpful assistant."},
794
+ {"role": "user", "content": "Write a short poem about programming."}
795
+ ],
796
+ stream=True
797
+ )
798
+
799
+ # Process the streaming response
800
+ for chunk in stream:
801
+ if chunk.choices[0].delta.content:
802
+ print(chunk.choices[0].delta.content, end="", flush=True)
803
+ print() # Add a newline at the end
804
+ ```
805
+
806
+ ### Basic Usage with StandardInput
807
+
808
+ ```python
809
+ from webscout.client import StandardInput
810
+
811
+ # Initialize the client
812
+ client = StandardInput()
813
+
814
+ # Create a completion (non-streaming)
815
+ response = client.chat.completions.create(
816
+ model="standard-quick",
817
+ messages=[
818
+ {"role": "system", "content": "You are a helpful assistant."},
819
+ {"role": "user", "content": "Tell me about Python programming."}
820
+ ]
821
+ )
822
+
823
+ # Print the response
824
+ print(response.choices[0].message.content)
825
+ ```
826
+
827
+ ### Streaming with StandardInput
828
+
829
+ ```python
830
+ from webscout.client import StandardInput
831
+
832
+ # Initialize the client
833
+ client = StandardInput()
834
+
835
+ # Create a streaming completion
836
+ stream = client.chat.completions.create(
837
+ model="standard-reasoning",
838
+ messages=[
839
+ {"role": "system", "content": "You are a helpful assistant."},
840
+ {"role": "user", "content": "Count from 1 to 5."}
841
+ ],
842
+ stream=True,
843
+ enable_reasoning=True # Enable reasoning capabilities
844
+ )
845
+
846
+ # Process the streaming response
847
+ for chunk in stream:
848
+ if chunk.choices[0].delta.content:
849
+ print(chunk.choices[0].delta.content, end="", flush=True)
850
+ print() # Add a newline at the end
851
+ ```
852
+
853
+ ## 🔄 Response Format
854
+
855
+ All providers return responses that mimic the OpenAI API structure, ensuring compatibility with tools built for OpenAI.
856
+
857
+ ### 📝 Non-streaming Response
858
+
859
+ ```json
860
+ {
861
+ "id": "chatcmpl-123abc",
862
+ "object": "chat.completion",
863
+ "created": 1677858242,
864
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
865
+ "usage": {
866
+ "prompt_tokens": 13,
867
+ "completion_tokens": 7,
868
+ "total_tokens": 20
869
+ },
870
+ "choices": [
871
+ {
872
+ "message": {
873
+ "role": "assistant",
874
+ "content": "This is a response from the model."
875
+ },
876
+ "finish_reason": "stop",
877
+ "index": 0
878
+ }
879
+ ]
880
+ }
881
+ ```
882
+
883
+ ### 📱 Streaming Response Chunks
884
+
885
+ ```json
886
+ {
887
+ "id": "chatcmpl-123abc",
888
+ "object": "chat.completion.chunk",
889
+ "created": 1677858242,
890
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
891
+ "choices": [
892
+ {
893
+ "delta": {
894
+ "content": "This "
895
+ },
896
+ "finish_reason": null,
897
+ "index": 0
898
+ }
899
+ ]
900
+ }
901
+ ```
902
+
903
+ ## 🧩 Architecture
904
+
905
+ The OpenAI-compatible providers are built on a modular architecture:
906
+
907
+ * `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
908
+ * `utils.py`: Provides data structures that mimic OpenAI's response format
909
+ * Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
910
+
911
+ This architecture makes it easy to add new providers while maintaining a consistent interface.
912
+
913
+ ## 📝 Notes
914
+
915
+ * Some providers may require API keys for full functionality
916
+ * Not all OpenAI features are supported by all providers
917
+ * Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
918
+
919
+ ## 🤝 Contributing
920
+
921
+ Want to add a new OpenAI-compatible provider? Follow these steps:
922
+
923
+ 1. Create a new file in the `webscout/Provider/OPENAI` directory
924
+ 2. Implement the `OpenAICompatibleProvider` interface
925
+ 3. Add appropriate tests
926
+ 4. Update this README with information about the new provider
927
+
928
+ ## 📚 Related Documentation
929
+
930
+ * [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
931
+ * [DeepInfra Documentation](https://deepinfra.com/docs)
932
+ * [Glider.so Website](https://glider.so/)
933
+ * [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
934
+ * [X0GPT Website](https://x0-gpt.devwtf.in/)
935
+ * [WiseCat Website](https://wise-cat-groq.vercel.app/)
936
+ * [Venice AI Website](https://venice.ai/)
937
+ * [ExaAI Website](https://o3minichat.exa.ai/)
938
+ * [TypeGPT Website](https://chat.typegpt.net/)
939
+ * [SciraChat Website](https://scira.ai/)
940
+ * [FreeAIChat Website](https://freeaichatplayground.com/)
941
+ * [LLMChatCo Website](https://llmchat.co/)
942
+ * [Yep.com Website](https://yep.com/)
943
+ * [HeckAI Website](https://heck.ai/)
944
+ * [SonusAI Website](https://chat.sonus.ai/)
945
+ * [ExaChat Website](https://exa-chat.vercel.app/)
946
+ * [Netwrck Website](https://netwrck.com/)
947
+ * [StandardInput Website](https://chat.standard-input.com/)
948
+
949
+ <div align="center">
950
+ <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
951
+ <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
952
+ </div>