webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (150) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -239
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  5. webscout/Extra/YTToolkit/README.md +375 -0
  6. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  7. webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
  8. webscout/Extra/autocoder/autocoder.py +309 -114
  9. webscout/Extra/autocoder/autocoder_utiles.py +15 -15
  10. webscout/Extra/gguf.md +430 -0
  11. webscout/Extra/tempmail/README.md +488 -0
  12. webscout/Extra/weather.md +281 -0
  13. webscout/Litlogger/Readme.md +175 -0
  14. webscout/Provider/AISEARCH/DeepFind.py +41 -37
  15. webscout/Provider/AISEARCH/README.md +279 -0
  16. webscout/Provider/AISEARCH/__init__.py +0 -1
  17. webscout/Provider/AISEARCH/genspark_search.py +228 -86
  18. webscout/Provider/AISEARCH/hika_search.py +11 -11
  19. webscout/Provider/AISEARCH/scira_search.py +324 -322
  20. webscout/Provider/AllenAI.py +7 -14
  21. webscout/Provider/Blackboxai.py +518 -74
  22. webscout/Provider/Cloudflare.py +0 -1
  23. webscout/Provider/Deepinfra.py +23 -21
  24. webscout/Provider/Flowith.py +217 -0
  25. webscout/Provider/FreeGemini.py +250 -0
  26. webscout/Provider/GizAI.py +15 -5
  27. webscout/Provider/Glider.py +11 -8
  28. webscout/Provider/HeckAI.py +80 -52
  29. webscout/Provider/Koboldai.py +7 -4
  30. webscout/Provider/LambdaChat.py +2 -2
  31. webscout/Provider/Marcus.py +10 -18
  32. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  33. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  35. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  36. webscout/Provider/OPENAI/README.md +1253 -0
  37. webscout/Provider/OPENAI/__init__.py +8 -0
  38. webscout/Provider/OPENAI/ai4chat.py +293 -286
  39. webscout/Provider/OPENAI/api.py +810 -0
  40. webscout/Provider/OPENAI/base.py +217 -14
  41. webscout/Provider/OPENAI/c4ai.py +373 -367
  42. webscout/Provider/OPENAI/chatgpt.py +7 -0
  43. webscout/Provider/OPENAI/chatgptclone.py +7 -0
  44. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  45. webscout/Provider/OPENAI/deepinfra.py +30 -20
  46. webscout/Provider/OPENAI/e2b.py +6 -0
  47. webscout/Provider/OPENAI/exaai.py +7 -0
  48. webscout/Provider/OPENAI/exachat.py +6 -0
  49. webscout/Provider/OPENAI/flowith.py +162 -0
  50. webscout/Provider/OPENAI/freeaichat.py +359 -352
  51. webscout/Provider/OPENAI/glider.py +323 -316
  52. webscout/Provider/OPENAI/groq.py +361 -354
  53. webscout/Provider/OPENAI/heckai.py +30 -64
  54. webscout/Provider/OPENAI/llmchatco.py +8 -0
  55. webscout/Provider/OPENAI/mcpcore.py +7 -0
  56. webscout/Provider/OPENAI/multichat.py +8 -0
  57. webscout/Provider/OPENAI/netwrck.py +356 -350
  58. webscout/Provider/OPENAI/opkfc.py +8 -0
  59. webscout/Provider/OPENAI/scirachat.py +471 -462
  60. webscout/Provider/OPENAI/sonus.py +9 -0
  61. webscout/Provider/OPENAI/standardinput.py +9 -1
  62. webscout/Provider/OPENAI/textpollinations.py +339 -329
  63. webscout/Provider/OPENAI/toolbaz.py +7 -0
  64. webscout/Provider/OPENAI/typefully.py +355 -0
  65. webscout/Provider/OPENAI/typegpt.py +358 -346
  66. webscout/Provider/OPENAI/uncovrAI.py +7 -0
  67. webscout/Provider/OPENAI/utils.py +103 -7
  68. webscout/Provider/OPENAI/venice.py +12 -0
  69. webscout/Provider/OPENAI/wisecat.py +19 -19
  70. webscout/Provider/OPENAI/writecream.py +7 -0
  71. webscout/Provider/OPENAI/x0gpt.py +7 -0
  72. webscout/Provider/OPENAI/yep.py +50 -21
  73. webscout/Provider/OpenGPT.py +1 -1
  74. webscout/Provider/TTI/AiForce/README.md +159 -0
  75. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  76. webscout/Provider/TTI/ImgSys/README.md +174 -0
  77. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  78. webscout/Provider/TTI/Nexra/README.md +155 -0
  79. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  80. webscout/Provider/TTI/README.md +128 -0
  81. webscout/Provider/TTI/aiarta/README.md +134 -0
  82. webscout/Provider/TTI/artbit/README.md +100 -0
  83. webscout/Provider/TTI/fastflux/README.md +129 -0
  84. webscout/Provider/TTI/huggingface/README.md +114 -0
  85. webscout/Provider/TTI/piclumen/README.md +161 -0
  86. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  87. webscout/Provider/TTI/talkai/README.md +139 -0
  88. webscout/Provider/TTS/README.md +192 -0
  89. webscout/Provider/TTS/__init__.py +2 -1
  90. webscout/Provider/TTS/speechma.py +500 -100
  91. webscout/Provider/TTS/sthir.py +94 -0
  92. webscout/Provider/TeachAnything.py +3 -7
  93. webscout/Provider/TextPollinationsAI.py +4 -2
  94. webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
  95. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  96. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  97. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  98. webscout/Provider/Writecream.py +11 -2
  99. webscout/Provider/__init__.py +8 -14
  100. webscout/Provider/ai4chat.py +4 -58
  101. webscout/Provider/asksteve.py +17 -9
  102. webscout/Provider/cerebras.py +3 -1
  103. webscout/Provider/koala.py +170 -268
  104. webscout/Provider/llmchat.py +3 -0
  105. webscout/Provider/lmarena.py +198 -0
  106. webscout/Provider/meta.py +7 -4
  107. webscout/Provider/samurai.py +223 -0
  108. webscout/Provider/scira_chat.py +4 -2
  109. webscout/Provider/typefully.py +23 -151
  110. webscout/__init__.py +4 -2
  111. webscout/cli.py +3 -28
  112. webscout/conversation.py +35 -35
  113. webscout/litagent/Readme.md +276 -0
  114. webscout/scout/README.md +402 -0
  115. webscout/swiftcli/Readme.md +323 -0
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +2 -182
  118. webscout/webscout_search_async.py +1 -179
  119. webscout/zeroart/README.md +89 -0
  120. webscout/zeroart/__init__.py +134 -54
  121. webscout/zeroart/base.py +19 -13
  122. webscout/zeroart/effects.py +101 -99
  123. webscout/zeroart/fonts.py +1239 -816
  124. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
  125. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
  126. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  127. webscout-8.2.8.dist-info/entry_points.txt +3 -0
  128. webscout-8.2.8.dist-info/top_level.txt +1 -0
  129. webscout/Provider/AISEARCH/ISou.py +0 -256
  130. webscout/Provider/ElectronHub.py +0 -773
  131. webscout/Provider/Free2GPT.py +0 -241
  132. webscout/Provider/GPTWeb.py +0 -249
  133. webscout/Provider/bagoodex.py +0 -145
  134. webscout/Provider/geminiprorealtime.py +0 -160
  135. webscout/scout/core.py +0 -881
  136. webscout-8.2.6.dist-info/entry_points.txt +0 -3
  137. webscout-8.2.6.dist-info/top_level.txt +0 -2
  138. webstoken/__init__.py +0 -30
  139. webstoken/classifier.py +0 -189
  140. webstoken/keywords.py +0 -216
  141. webstoken/language.py +0 -128
  142. webstoken/ner.py +0 -164
  143. webstoken/normalizer.py +0 -35
  144. webstoken/processor.py +0 -77
  145. webstoken/sentiment.py +0 -206
  146. webstoken/stemmer.py +0 -73
  147. webstoken/tagger.py +0 -60
  148. webstoken/tokenizer.py +0 -158
  149. /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
  150. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
@@ -0,0 +1,1253 @@
1
+ <div align="center">
2
+ <a href="https://github.com/OEvortex/Webscout">
3
+ <img src="https://img.shields.io/badge/WebScout-OpenAI%20Compatible%20Providers-4285F4?style=for-the-badge&logo=openai&logoColor=white" alt="WebScout OpenAI Compatible Providers">
4
+ </a>
5
+ <br/>
6
+ <h1>WebScout OpenAI-Compatible Providers</h1>
7
+ <p><strong>Seamlessly integrate with various AI providers using OpenAI-compatible interfaces</strong></p>
8
+
9
+ <p>
10
+ <img src="https://img.shields.io/badge/Python-3.7+-3776AB?style=flat-square&logo=python&logoColor=white" alt="Python 3.7+">
11
+ <img src="https://img.shields.io/badge/License-MIT-green?style=flat-square" alt="License: MIT">
12
+ <img src="https://img.shields.io/badge/PRs-Welcome-brightgreen?style=flat-square" alt="PRs Welcome">
13
+ </p>
14
+
15
+ <p>
16
+ Access multiple AI providers through a standardized OpenAI-compatible interface, making it easy to switch between providers without changing your code.
17
+ </p>
18
+ </div>
19
+
20
+ ## 🚀 Overview
21
+
22
+ The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
+
24
+ * Use the same code structure across different AI providers
25
+ * Switch between providers without major code changes
26
+ * Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
+
28
+ ## ⚙️ Available Providers
29
+
30
+ Currently, the following providers are implemented with OpenAI-compatible interfaces:
31
+
32
+ - DeepInfra
33
+ - Glider
34
+ - ChatGPTClone
35
+ - X0GPT
36
+ - WiseCat
37
+ - Venice
38
+ - ExaAI
39
+ - TypeGPT
40
+ - SciraChat
41
+ - LLMChatCo
42
+ - FreeAIChat
43
+ - YEPCHAT
44
+ - HeckAI
45
+ - SonusAI
46
+ - ExaChat
47
+ - Netwrck
48
+ - StandardInput
49
+ - Writecream
50
+ - toolbaz
51
+ - UncovrAI
52
+ - OPKFC
53
+ - TextPollinations
54
+ - E2B
55
+ - MultiChatAI
56
+ - AI4Chat
57
+ - MCPCore
58
+ - TypefullyAI
59
+ - Flowith
60
+ - ChatSandbox
61
+ - Cloudflare
62
+ - NEMOTRON
63
+ - BLACKBOXAI
64
+ ---
65
+
66
+ ### <img src="https://img.shields.io/badge/DeepInfra-0A0A0A?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiM1OGE2ZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMjAgMTFhOCA4IDAgMCAwLTE2IDAiPjwvcGF0aD48cGF0aCBkPSJtMTIgMTEgOS0xIj48L3BhdGg+PHBhdGggZD0iTTEyIDExIDMgMTAiPjwvcGF0aD48cGF0aCBkPSJNMTIgMTFWMiI+PC9wYXRoPjxwYXRoIGQ9Ik0xMiAxMXY5Ij48L3BhdGg+PC9zdmc+" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> DeepInfra
67
+
68
+ Access DeepInfra's powerful models through an OpenAI-compatible interface.
69
+
70
+ **Available Models:**
71
+
72
+ * `deepseek-ai/DeepSeek-V3`
73
+ * `google/gemma-2-27b-it`
74
+ * `meta-llama/Llama-4-Maverick-17B`
75
+ * `meta-llama/Llama-3.3-70B-Instruct`
76
+ * `microsoft/phi-4`
77
+ * `mistralai/Mistral-Small-24B`
78
+ * `Qwen/QwQ-32B`
79
+
80
+ [View all models →](https://deepinfra.com/models)
81
+
82
+ ---
83
+
84
+ ### <img src="https://img.shields.io/badge/Glider-5C5CFF?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTQgMmMxLjgyLjgyIDMgMi41NyAzIDQuNSAwIDMtMy41IDUuNS02LjUgNS41YTQuMzUgNC4zNSAwIDAgMS0yLjUtLjc4QTYgNiAwIDAgMCAxNiAxM2MzLjMxIDAgNi0xLjggNi01LjVDMjIgMy4yIDE5LjMxIDEgMTYgMXoiPjwvcGF0aD48cGF0aCBkPSJNMiAyMi41QzIgMTkuNDYgNS41NSAxNyA5LjUgMTdzNy41IDIuNDYgNy41IDUuNVMxMy40NSAyOCA5LjUgMjggMiAyNS41NCAyIDIyLjV6Ij48L3BhdGg+PHBhdGggZD0iTTExIDE0Yy0xLjgyLS44Mi0zLTIuNTctMy00LjUgMC0zIDMuNS01LjUgNi41LTUuNWE0LjM1IDQuMzUgMCAwIDEgMi41Ljc4QTYgNiAwIDAgMCA5IDRDNS42OSA0IDMgNS44IDMgOS41YzAgMi42OSAyLjY5IDQuOSA2IDUuNXoiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> Glider
85
+
86
+ Access Glider.so's models through an OpenAI-compatible interface.
87
+
88
+ **Available Models:**
89
+
90
+ * `chat-llama-3-1-70b`
91
+ * `chat-llama-3-1-8b`
92
+ * `chat-llama-3-2-3b`
93
+ * `deepseek-ai/DeepSeek-R1`
94
+
95
+ ---
96
+
97
+ ### <img src="https://img.shields.io/badge/ChatGPTClone-10A37F?style=flat-square&logo=openai&logoColor=white" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> ChatGPTClone
98
+
99
+ Access ChatGPT Clone API through an OpenAI-compatible interface.
100
+
101
+ **Available Models:**
102
+
103
+ * `gpt-4`
104
+ * `gpt-3.5-turbo`
105
+
106
+ ---
107
+
108
+ ### <img src="https://img.shields.io/badge/X0GPT-000000?style=flat-square&logo=x&logoColor=white" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> X0GPT
109
+
110
+ Access X0GPT API through an OpenAI-compatible interface.
111
+
112
+ **Available Models:**
113
+
114
+ * `gpt-4`
115
+ * `gpt-3.5-turbo`
116
+
117
+ ---
118
+
119
+ ### <img src="https://img.shields.io/badge/WiseCat-FF6B6B?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMjJjNS41MjMgMCAxMC00LjQ3NyAxMC0xMFMxNy41MjMgMiAxMiAyIDIgNi40NzcgMiAxMnM0LjQ3NyAxMCAxMCAxMHoiPjwvcGF0aD48cGF0aCBkPSJNOCA5aDJhMiAyIDAgMCAwIDIgMnYyYzAgMS4xLjkgMiAyIDJoMiI+PC9wYXRoPjxwYXRoIGQ9Ik0xMCAxNGgtMmEyIDIgMCAwIDEtMi0ydi0yYzAtMS4xLS45LTItMi0ySDIiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> WiseCat
120
+
121
+ Access WiseCat API through an OpenAI-compatible interface.
122
+
123
+ **Available Models:**
124
+
125
+ * `chat-model-small`
126
+ * `chat-model-large`
127
+ * `chat-model-reasoning`
128
+
129
+ ---
130
+
131
+ ### <img src="https://img.shields.io/badge/Venice-3498DB?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMTlsNy03IDMgMyAtNyA3LTMtM3oiPjwvcGF0aD48cGF0aCBkPSJNMTggMTNsLTEuNS03LjVMMiAybDMuNSAxNC41TDEzIDE4bDUtNXoiPjwvcGF0aD48cGF0aCBkPSJNMiAybDcuNTg2IDcuNTg2Ij48L3BhdGggZD0iTTExIDExbDUgNSI+PC9wYXRoPjwvc3ZnPg==" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> Venice
132
+
133
+ Access Venice AI API through an OpenAI-compatible interface.
134
+
135
+ **Available Models:**
136
+
137
+ * `mistral-31-24b`
138
+ * `llama-3.2-3b-akash`
139
+ * `qwen2dot5-coder-32b`
140
+ * `deepseek-coder-v2-lite`
141
+
142
+ ---
143
+
144
+ ### <img src="https://img.shields.io/badge/ExaAI-6236FF?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTEyIDhhNCA0IDAgMSAwIDAgOCA0IDQgMCAwIDAgMC04eiI+PC9wYXRoPjxwYXRoIGQ9Ik0xMiAydjQiPjwvcGF0aCBkPSJNMTIgMTh2NCI+PC9wYXRoPjxwYXRoIGQ9Ik00LjkzIDQuOTNsMyAzIj48L3BhdGggZD0iTTE2LjA3IDE2LjA3bDMgMyI+PC9wYXRoPjxwYXRoIGQ9Ik0yIDEyaDQiPjwvcGF0aD48cGF0aCBkPSJNMTggMTJoNCI+PC9wYXRoPjxwYXRoIGQ9Ik00LjkzIDE5LjA3bDMtMyI+PC9wYXRoPjxwYXRoIGQ9Ik0xNi4wNyA3LjkzbDMtMyI+PC9wYXRoPjwvc3ZnPg==" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> ExaAI
145
+
146
+ Access ExaAI's O3-Mini model through an OpenAI-compatible interface.
147
+
148
+ **Available Models:**
149
+
150
+ * `O3-Mini`: ExaAI's O3-Mini model
151
+
152
+ > **Important Note:** ExaAI does not support system messages. Any system messages will be automatically removed from the conversation.
153
+
154
+ ---
155
+
156
+ ### <img src="https://img.shields.io/badge/TypeGPT-4B32C3?style=flat-square&logo=typescript&logoColor=white" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> TypeGPT
157
+
158
+ Access TypeGPT.net's models through an OpenAI-compatible interface.
159
+
160
+ **Available Models:**
161
+
162
+ * `gpt-4o-mini-2024-07-18`: OpenAI's GPT-4o mini model
163
+ * `chatgpt-4o-latest`: Latest version of ChatGPT with GPT-4o
164
+ * `deepseek-r1`: DeepSeek's R1 model
165
+ * `deepseek-v3`: DeepSeek's V3 model
166
+ * `uncensored-r1`: Uncensored version of DeepSeek R1
167
+ * `Image-Generator`: For generating images
168
+
169
+ ---
170
+
171
+ ### <img src="https://img.shields.io/badge/SciraChat-FF5700?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMjEgMTVhMiAyIDAgMCAxLTIgMmgtOWE2IDYgMCAwIDEtNi02VjhoMTBhMiAyIDAgMCAxIDIgMnYyaDRhMiAyIDAgMCAxIDIgMnoiPjwvcGF0aD48cGF0aCBkPSJNMTQgMTFhMiAyIDAgMCAxLTIgMkg0YTIgMiAwIDAgMS0yLTJWN2EyIDIgMCAwIDEgMi0yaDEwYTIgMiAwIDAgMSAyIDJ6Ij48L3BhdGg+PC9zdmc+" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> SciraChat
172
+
173
+ Access Scira.ai's models through an OpenAI-compatible interface.
174
+
175
+ **Available Models:**
176
+
177
+ * `scira-default`: Grok3 model
178
+ * `scira-grok-3-mini`: Grok3-mini (thinking model)
179
+ * `scira-vision`: Grok2-Vision (vision model)
180
+ * `scira-claude`: Sonnet-3.7 model
181
+ * `scira-optimus`: Optimus model
182
+
183
+ ---
184
+
185
+ ### <img src="https://img.shields.io/badge/LLMChatCo-4A90E2?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTggMTRzMS41IDIgNCAxLjVjMi41LS41IDQtMS41IDQtMS41Ij48L3BhdGg+PHBhdGggZD0iTTkgOWguMDEiPjwvcGF0aD48cGF0aCBkPSJNMTUgOWguMDEiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> LLMChatCo
186
+
187
+ Access LLMChat.co's models through an OpenAI-compatible interface.
188
+
189
+ **Available Models:**
190
+
191
+ * `gemini-flash-2.0`: Google's Gemini Flash 2.0 model (default)
192
+ * `llama-4-scout`: Meta's Llama 4 Scout model
193
+ * `gpt-4o-mini`: OpenAI's GPT-4o mini model
194
+
195
+ ---
196
+
197
+ ### <img src="https://img.shields.io/badge/FreeAIChat-00C7B7?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMjEgMTVhMiAyIDAgMCAxLTIgMkgzYTIgMiAwIDAgMS0yLTJWN2EyIDIgMCAwIDEgMi0yaDEwYTIgMiAwIDAgMSAyIDJ2M2g0YTIgMiAwIDAgMSAyIDJ6Ij48L3BhdGg+PC9zdmc+" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> FreeAIChat
198
+
199
+ Access FreeAIChat's wide range of models through an OpenAI-compatible interface.
200
+
201
+ **Available Models:**
202
+
203
+ **<img src="https://img.shields.io/badge/OpenAI-412991?style=flat-square&logo=openai&logoColor=white" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> OpenAI Models**
204
+ * `GPT 4o`
205
+ * `GPT 4.5 Preview`
206
+ * `GPT 4o Latest`
207
+ * `O1`
208
+ * `O3 Mini`
209
+
210
+ **<img src="https://img.shields.io/badge/Anthropic-0000FF?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTEyIDhhNCA0IDAgMSAwIDAgOCA0IDQgMCAwIDAgMC04eiI+PC9wYXRoPjwvc3ZnPg==" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> Anthropic Models**
211
+ * `Claude 3.5 haiku`
212
+ * `Claude 3.5 sonnet`
213
+ * `Claude 3.7 Sonnet`
214
+
215
+ **<img src="https://img.shields.io/badge/Google-4285F4?style=flat-square&logo=google&logoColor=white" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> Google Models**
216
+ * `Gemini 1.5 Flash`
217
+ * `Gemini 1.5 Pro`
218
+ * `Gemini 2.0 Pro`
219
+ * `Gemini 2.5 Pro`
220
+
221
+ **<img src="https://img.shields.io/badge/Llama-FF6B6B?style=flat-square&logo=meta&logoColor=white" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> Llama Models**
222
+ * `Llama 3.1 405B`
223
+ * `Llama 3.3 70B`
224
+ * `Llama 4 Scout`
225
+
226
+ **<img src="https://img.shields.io/badge/Mistral-7952B3?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMTlsNy03IDMgMyAtNyA3LTMtM3oiPjwvcGF0aD48cGF0aCBkPSJNMTggMTNsLTEuNS03LjVMMiAybDMuNSAxNC41TDEzIDE4bDUtNXoiPjwvcGF0aD48L3N2Zz4=" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> Mistral Models**
227
+ * `Mistral Large`
228
+ * `Mistral Nemo`
229
+ * `Mixtral 8x22B`
230
+
231
+ **<img src="https://img.shields.io/badge/Other-34D399?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmE5IDkgMCAwIDEgOSA5IDkgOSAwIDAgMS05IDkgOSA5IDAgMCAxLTkgOSA5IDkgMCAwIDEtOS05eiI+PC9wYXRoPjxwYXRoIGQ9Ik0xMiAyYTkgOSAwIDAgMC05IDkgOSA5IDAgMCAwIDkgOSA5IDkgMCAwIDAgOS05IDkgOSAwIDAgMC05LTl6Ij48L3BhdGg+PHBhdGggZD0iTTEyIDJhOSA5IDAgMCAxIDAgMTggOSA5IDAgMCAxIDAtMTh6Ij48L3BhdGg+PC9zdmc+" alt="" height="16" style="vertical-align: middle; margin-right: 5px;"> Other Models**
232
+ * `Deepseek R1`
233
+ * `Qwen Max`
234
+ * `Grok 3`
235
+
236
+ ---
237
+
238
+ ### <img src="https://img.shields.io/badge/YEPCHAT-FFD700?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiMwMDAwMDAiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTkgMTZhMyAzIDAgMCAwIDYgMCI+PC9wYXRoPjxwYXRoIGQ9Ik05IDloLjAxIj48L3BhdGg+PHBhdGggZD0iTTE1IDloLjAxIj48L3BhdGg+PC9zdmc+" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> YEPCHAT
239
+
240
+ Access Yep.com's models through an OpenAI-compatible interface.
241
+
242
+ **Available Models:**
243
+
244
+ * `DeepSeek-R1-Distill-Qwen-32B`
245
+ * `Mixtral-8x7B-Instruct-v0.1`
246
+
247
+ ---
248
+
249
+ ### <img src="https://img.shields.io/badge/HeckAI-5D3FD3?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTggMTRzMS41IDIgNCAxLjVjMi41LS41IDQtMS41IDQtMS41Ij48L3BhdGg+PHBhdGggZD0iTTkgOWguMDEiPjwvcGF0aD48cGF0aCBkPSJNMTUgOWguMDEiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> HeckAI
250
+
251
+ Access HeckAI's models through an OpenAI-compatible interface.
252
+
253
+ **Available Models:**
254
+
255
+ * `deepseek/deepseek-chat`
256
+ * `openai/gpt-4o-mini`
257
+ * `deepseek/deepseek-r1`
258
+ * `google/gemini-2.0-flash-001`
259
+
260
+ ---
261
+
262
+ ### <img src="https://img.shields.io/badge/SonusAI-00BFFF?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmE5IDkgMCAwIDEgOSA5LjUgOSA5IDAgMCAxLTkgOS41IDkgOSAwIDAgMS05LTkuNUE5IDkgMCAwIDEgMTIgMnoiPjwvcGF0aD48cGF0aCBkPSJNOCAxNGEzIDMgMCAwIDAgNiAwIj48L3BhdGg+PHBhdGggZD0iTTkgOWguMDEiPjwvcGF0aD48cGF0aCBkPSJNMTUgOWguMDEiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> SonusAI
263
+
264
+ Access SonusAI's models through an OpenAI-compatible interface.
265
+
266
+ **Available Models:**
267
+
268
+ * `pro` - SonusAI's premium model
269
+ * `air` - SonusAI's balanced model
270
+ * `mini` - SonusAI's lightweight model
271
+
272
+ ---
273
+
274
+ ### <img src="https://img.shields.io/badge/ExaChat-4B0082?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTggMTRzMS41IDIgNCAxLjVjMi41LS41IDQtMS41IDQtMS41Ij48L3BhdGg+PHBhdGggZD0iTTkgOWguMDEiPjwvcGF0aD48cGF0aCBkPSJNMTUgOWguMDEiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> ExaChat
275
+
276
+ Access ExaChat's multi-provider models through an OpenAI-compatible interface.
277
+
278
+ **Available Models:**
279
+
280
+ * ExaAnswer: `exaanswer`
281
+ * Gemini: `gemini-2.0-flash`, `gemini-2.5-pro-exp-03-25`, and more
282
+ * OpenRouter: `deepseek/deepseek-r1:free`, `meta-llama/llama-4-maverick:free`, and more
283
+ * Groq: `llama-3.1-8b-instant`, `qwen-2.5-32b`, and more
284
+ * Cerebras: `llama3.1-8b`, `llama-3.3-70b`
285
+
286
+ ---
287
+
288
+ ### <img src="https://img.shields.io/badge/Netwrck-3498DB?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTggMTRzMS41IDIgNCAxLjVjMi41LS41IDQtMS41IDQtMS41Ij48L3BhdGg+PHBhdGggZD0iTTkgOWguMDEiPjwvcGF0aD48cGF0aCBkPSJNMTUgOWguMDEiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> Netwrck
289
+
290
+ Access Netwrck's models through an OpenAI-compatible interface.
291
+
292
+ **Available Models:**
293
+
294
+ * `anthropic/claude-3-7-sonnet-20250219`
295
+ * `openai/gpt-4o-mini`
296
+ * `deepseek/deepseek-r1`
297
+ * `deepseek/deepseek-chat`
298
+ * `x-ai/grok-2`
299
+ * `google/gemini-pro-1.5`
300
+ * And more
301
+
302
+ ---
303
+
304
+ ### <img src="https://img.shields.io/badge/StandardInput-4A90E2?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMjEgMTVhMiAyIDAgMCAxLTIgMkgzYTIgMiAwIDAgMS0yLTJWN2EyIDIgMCAwIDEgMi0yaDEwYTIgMiAwIDAgMSAyIDJ2M2g0YTIgMiAwIDAgMSAyIDJ6Ij48L3BhdGg+PC9zdmc+" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> StandardInput
305
+
306
+ Access Standard Input's chat models through an OpenAI-compatible interface.
307
+
308
+ **Available Models:**
309
+
310
+ * `standard-quick`: Standard Input's quick response model
311
+ * `standard-reasoning`: Standard Input's model with reasoning capabilities
312
+
313
+ ---
314
+
315
+ ### <img src="https://img.shields.io/badge/E2B-FFA500?style=flat-square&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiMwMDAwMDAiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48cGF0aCBkPSJNMTIgMmExMCAxMCAwIDEgMCAwIDIwIDEwIDEwIDAgMCAwIDAtMjB6Ij48L3BhdGg+PHBhdGggZD0iTTggMTJoOCI+PC9wYXRoPjxwYXRoIGQ9Ik0xMiA4djgiPjwvcGF0aD48L3N2Zz4=" alt="" height="20" style="vertical-align: middle; margin-right: 8px;"> E2B
316
+
317
+ Access various models via the E2B Fragments API (fragments.e2b.dev) through an OpenAI-compatible interface. Uses `cloudscraper` to handle potential Cloudflare protection.
318
+
319
+ **Available Models:**
320
+
321
+ * `claude-3.7-sonnet`
322
+ * `claude-3.5-sonnet`
323
+ * `claude-3.5-haiku`
324
+ * `o1-mini`, `o3-mini`, `o1`, `o3`
325
+ * `gpt-4.5-preview`, `gpt-4o`
326
+ * `gpt-4.1`, `gpt-4.1-mini`, `gpt-4.1-nano`
327
+ * `gemini-1.5-pro-002`
328
+ * `gemini-2.5-pro-exp-03-25`
329
+ * `gemini-2.0-flash`, `gemini-2.0-flash-lite`, `gemini-2.0-flash-thinking-exp-01-21`
330
+ * `qwen-qwq-32b-preview`
331
+ * `grok-beta`
332
+ * `deepseek-chat`
333
+ * `codestral-2501`
334
+ * `mistral-large-latest`
335
+ * `llama4-maverick-instruct-basic`, `llama4-scout-instruct-basic`
336
+ * `llama-v3p1-405b-instruct`
337
+
338
+ > **Note:** The underlying API does not support true streaming. `stream=True` simulates streaming by returning the full response.
339
+
340
+ ---
341
+
342
+ ## 💻 Usage Examples
343
+
344
+ Here are examples of how to use the OpenAI-compatible providers in your code.
345
+
346
+ ### Basic Usage with DeepInfra
347
+
348
+ ```python
349
+ from webscout.Provider.OPENAI import DeepInfra
350
+
351
+ # Initialize the client
352
+ client = DeepInfra()
353
+
354
+ # Create a completion (non-streaming)
355
+ response = client.chat.completions.create(
356
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
357
+ messages=[
358
+ {"role": "system", "content": "You are a helpful assistant."},
359
+ {"role": "user", "content": "Tell me about Python programming."}
360
+ ],
361
+ temperature=0.7,
362
+ max_tokens=500
363
+ )
364
+
365
+ # Print the response
366
+ print(response.choices[0].message.content)
367
+ ```
368
+
369
+ ### Basic Usage with Glider
370
+
371
+ ```python
372
+ from webscout.Provider.OPENAI import Glider
373
+
374
+ # Initialize the client
375
+ client = Glider()
376
+
377
+ # Create a completion (non-streaming)
378
+ response = client.chat.completions.create(
379
+ model="chat-llama-3-1-70b",
380
+ messages=[
381
+ {"role": "system", "content": "You are a helpful assistant."},
382
+ {"role": "user", "content": "Tell me about Python programming."}
383
+ ],
384
+ max_tokens=500
385
+ )
386
+
387
+ # Print the response
388
+ print(response.choices[0].message.content)
389
+ ```
390
+
391
+ ### Streaming Responses (Example with DeepInfra)
392
+
393
+ ```python
394
+ from webscout.Provider.OPENAI import DeepInfra
395
+
396
+ # Initialize the client
397
+ client = DeepInfra()
398
+
399
+ # Create a streaming completion
400
+ stream = client.chat.completions.create(
401
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
402
+ messages=[
403
+ {"role": "system", "content": "You are a helpful assistant."},
404
+ {"role": "user", "content": "Write a short poem about programming."}
405
+ ],
406
+ stream=True,
407
+ temperature=0.7
408
+ )
409
+
410
+ # Process the streaming response
411
+ for chunk in stream:
412
+ if chunk.choices[0].delta.content:
413
+ print(chunk.choices[0].delta.content, end="", flush=True)
414
+ print() # Add a newline at the end
415
+ ```
416
+
417
+ ### Streaming with Glider
418
+
419
+ ```python
420
+ from webscout.Provider.OPENAI import Glider
421
+
422
+ # Initialize the client
423
+ client = Glider()
424
+
425
+ # Create a streaming completion
426
+ stream = client.chat.completions.create(
427
+ model="chat-llama-3-1-70b",
428
+ messages=[
429
+ {"role": "system", "content": "You are a helpful assistant."},
430
+ {"role": "user", "content": "Write a short poem about programming."}
431
+ ],
432
+ stream=True
433
+ )
434
+
435
+ # Process the streaming response
436
+ for chunk in stream:
437
+ if chunk.choices[0].delta.content:
438
+ print(chunk.choices[0].delta.content, end="", flush=True)
439
+ print() # Add a newline at the end
440
+ ```
441
+
442
+ ### Basic Usage with ChatGPTClone
443
+
444
+ ```python
445
+ from webscout.Provider.OPENAI import ChatGPTClone
446
+
447
+ # Initialize the client
448
+ client = ChatGPTClone()
449
+
450
+ # Create a completion (non-streaming)
451
+ response = client.chat.completions.create(
452
+ model="gpt-4",
453
+ messages=[
454
+ {"role": "system", "content": "You are a helpful assistant."},
455
+ {"role": "user", "content": "Tell me about Python programming."}
456
+ ],
457
+ temperature=0.7
458
+ )
459
+
460
+ # Print the response
461
+ print(response.choices[0].message.content)
462
+ ```
463
+
464
+ ### Streaming with ChatGPTClone
465
+
466
+ ```python
467
+ from webscout.Provider.OPENAI import ChatGPTClone
468
+
469
+ # Initialize the client
470
+ client = ChatGPTClone()
471
+
472
+ # Create a streaming completion
473
+ stream = client.chat.completions.create(
474
+ model="gpt-4",
475
+ messages=[
476
+ {"role": "system", "content": "You are a helpful assistant."},
477
+ {"role": "user", "content": "Write a short poem about programming."}
478
+ ],
479
+ stream=True
480
+ )
481
+
482
+ # Process the streaming response
483
+ for chunk in stream:
484
+ if chunk.choices[0].delta.content:
485
+ print(chunk.choices[0].delta.content, end="", flush=True)
486
+ print() # Add a newline at the end
487
+ ```
488
+
489
+ ### Basic Usage with X0GPT
490
+
491
+ ```python
492
+ from webscout.Provider.OPENAI import X0GPT
493
+
494
+ # Initialize the client
495
+ client = X0GPT()
496
+
497
+ # Create a completion (non-streaming)
498
+ response = client.chat.completions.create(
499
+ model="gpt-4", # Model name doesn't matter for X0GPT
500
+ messages=[
501
+ {"role": "system", "content": "You are a helpful assistant."},
502
+ {"role": "user", "content": "Tell me about Python programming."}
503
+ ]
504
+ )
505
+
506
+ # Print the response
507
+ print(response.choices[0].message.content)
508
+ ```
509
+
510
+ ### Streaming with X0GPT
511
+
512
+ ```python
513
+ from webscout.Provider.OPENAI import X0GPT
514
+
515
+ # Initialize the client
516
+ client = X0GPT()
517
+
518
+ # Create a streaming completion
519
+ stream = client.chat.completions.create(
520
+ model="gpt-4", # Model name doesn't matter for X0GPT
521
+ messages=[
522
+ {"role": "system", "content": "You are a helpful assistant."},
523
+ {"role": "user", "content": "Write a short poem about programming."}
524
+ ],
525
+ stream=True
526
+ )
527
+
528
+ # Process the streaming response
529
+ for chunk in stream:
530
+ if chunk.choices[0].delta.content:
531
+ print(chunk.choices[0].delta.content, end="", flush=True)
532
+ print() # Add a newline at the end
533
+ ```
534
+
535
+ ### Basic Usage with WiseCat
536
+
537
+ ```python
538
+ from webscout.Provider.OPENAI import WiseCat
539
+
540
+ # Initialize the client
541
+ client = WiseCat()
542
+
543
+ # Create a completion (non-streaming)
544
+ response = client.chat.completions.create(
545
+ model="chat-model-small",
546
+ messages=[
547
+ {"role": "system", "content": "You are a helpful assistant."},
548
+ {"role": "user", "content": "Tell me about Python programming."}
549
+ ]
550
+ )
551
+
552
+ # Print the response
553
+ print(response.choices[0].message.content)
554
+ ```
555
+
556
+ ### Streaming with WiseCat
557
+
558
+ ```python
559
+ from webscout.Provider.OPENAI import WiseCat
560
+
561
+ # Initialize the client
562
+ client = WiseCat()
563
+
564
+ # Create a streaming completion
565
+ stream = client.chat.completions.create(
566
+ model="chat-model-small",
567
+ messages=[
568
+ {"role": "system", "content": "You are a helpful assistant."},
569
+ {"role": "user", "content": "Write a short poem about programming."}
570
+ ],
571
+ stream=True
572
+ )
573
+
574
+ # Process the streaming response
575
+ for chunk in stream:
576
+ if chunk.choices[0].delta.content:
577
+ print(chunk.choices[0].delta.content, end="", flush=True)
578
+ print() # Add a newline at the end
579
+ ```
580
+
581
+ ### Basic Usage with Venice
582
+
583
+ ```python
584
+ from webscout.Provider.OPENAI import Venice
585
+
586
+ # Initialize the client
587
+ client = Venice(temperature=0.7, top_p=0.9)
588
+
589
+ # Create a completion (non-streaming)
590
+ response = client.chat.completions.create(
591
+ model="mistral-31-24b",
592
+ messages=[
593
+ {"role": "system", "content": "You are a helpful assistant."},
594
+ {"role": "user", "content": "Tell me about Python programming."}
595
+ ]
596
+ )
597
+
598
+ # Print the response
599
+ print(response.choices[0].message.content)
600
+ ```
601
+
602
+ ### Streaming with Venice
603
+
604
+ ```python
605
+ from webscout.Provider.OPENAI import Venice
606
+
607
+ # Initialize the client
608
+ client = Venice()
609
+
610
+ # Create a streaming completion
611
+ stream = client.chat.completions.create(
612
+ model="mistral-31-24b",
613
+ messages=[
614
+ {"role": "system", "content": "You are a helpful assistant."},
615
+ {"role": "user", "content": "Write a short poem about programming."}
616
+ ],
617
+ stream=True
618
+ )
619
+
620
+ # Process the streaming response
621
+ for chunk in stream:
622
+ if chunk.choices[0].delta.content:
623
+ print(chunk.choices[0].delta.content, end="", flush=True)
624
+ print() # Add a newline at the end
625
+ ```
626
+
627
+ ### Basic Usage with ExaAI
628
+
629
+ ```python
630
+ from webscout.Provider.OPENAI import ExaAI
631
+
632
+ # Initialize the client
633
+ client = ExaAI()
634
+
635
+ # Create a completion (non-streaming)
636
+ response = client.chat.completions.create(
637
+ model="O3-Mini",
638
+ messages=[
639
+ # Note: ExaAI does not support system messages (they will be removed)
640
+ {"role": "user", "content": "Hello!"},
641
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
642
+ {"role": "user", "content": "Tell me about Python programming."}
643
+ ]
644
+ )
645
+
646
+ # Print the response
647
+ print(response.choices[0].message.content)
648
+ ```
649
+
650
+ ### Basic Usage with HeckAI
651
+
652
+ ```python
653
+ from webscout.Provider.OPENAI import HeckAI
654
+
655
+ # Initialize the client
656
+ client = HeckAI(language="English")
657
+
658
+ # Create a completion (non-streaming)
659
+ response = client.chat.completions.create(
660
+ model="google/gemini-2.0-flash-001",
661
+ messages=[
662
+ {"role": "system", "content": "You are a helpful assistant."},
663
+ {"role": "user", "content": "Tell me about Python programming."}
664
+ ]
665
+ )
666
+
667
+ # Print the response
668
+ print(response.choices[0].message.content)
669
+ ```
670
+
671
+ ### Streaming with HeckAI
672
+
673
+ ```python
674
+ from webscout.Provider.OPENAI import HeckAI
675
+
676
+ # Initialize the client
677
+ client = HeckAI()
678
+
679
+ # Create a streaming completion
680
+ stream = client.chat.completions.create(
681
+ model="google/gemini-2.0-flash-001",
682
+ messages=[
683
+ {"role": "system", "content": "You are a helpful assistant."},
684
+ {"role": "user", "content": "Write a short poem about programming."}
685
+ ],
686
+ stream=True
687
+ )
688
+
689
+ # Process the streaming response
690
+ for chunk in stream:
691
+ if chunk.choices[0].delta.content:
692
+ print(chunk.choices[0].delta.content, end="", flush=True)
693
+ print() # Add a newline at the end
694
+ ```
695
+
696
+ ### Streaming with ExaAI
697
+
698
+ ```python
699
+ from webscout.Provider.OPENAI import ExaAI
700
+
701
+ # Initialize the client
702
+ client = ExaAI()
703
+
704
+ # Create a streaming completion
705
+ stream = client.chat.completions.create(
706
+ model="O3-Mini",
707
+ messages=[
708
+ # Note: ExaAI does not support system messages (they will be removed)
709
+ {"role": "user", "content": "Hello!"},
710
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
711
+ {"role": "user", "content": "Write a short poem about programming."}
712
+ ],
713
+ stream=True
714
+ )
715
+
716
+ # Process the streaming response
717
+ for chunk in stream:
718
+ if chunk.choices[0].delta.content:
719
+ print(chunk.choices[0].delta.content, end="", flush=True)
720
+ print() # Add a newline at the end
721
+ ```
722
+
723
+ ### Basic Usage with TypeGPT
724
+
725
+ ```python
726
+ from webscout.Provider.OPENAI import TypeGPT
727
+
728
+ # Initialize the client
729
+ client = TypeGPT()
730
+
731
+ # Create a completion (non-streaming)
732
+ response = client.chat.completions.create(
733
+ model="chatgpt-4o-latest",
734
+ messages=[
735
+ {"role": "system", "content": "You are a helpful assistant."},
736
+ {"role": "user", "content": "Write a short poem about programming."}
737
+ ]
738
+ )
739
+
740
+ # Print the response
741
+ print(response.choices[0].message.content)
742
+ ```
743
+
744
+ ### Streaming with TypeGPT
745
+
746
+ ```python
747
+ from webscout.Provider.OPENAI import TypeGPT
748
+
749
+ # Initialize the client
750
+ client = TypeGPT()
751
+
752
+ # Create a streaming completion
753
+ stream = client.chat.completions.create(
754
+ model="chatgpt-4o-latest",
755
+ messages=[
756
+ {"role": "system", "content": "You are a helpful assistant."},
757
+ {"role": "user", "content": "Write a short poem about programming."}
758
+ ],
759
+ stream=True
760
+ )
761
+
762
+ # Process the streaming response
763
+ for chunk in stream:
764
+ if chunk.choices[0].delta.content:
765
+ print(chunk.choices[0].delta.content, end="", flush=True)
766
+ print() # Add a newline at the end
767
+ ```
768
+
769
+ ### Basic Usage with SciraChat
770
+
771
+ ```python
772
+ from webscout.Provider.OPENAI import SciraChat
773
+
774
+ # Initialize the client
775
+ client = SciraChat()
776
+
777
+ # Create a completion (non-streaming)
778
+ response = client.chat.completions.create(
779
+ model="scira-default",
780
+ messages=[
781
+ {"role": "system", "content": "You are a helpful assistant."},
782
+ {"role": "user", "content": "Tell me about Python programming."}
783
+ ]
784
+ )
785
+
786
+ # Print the response
787
+ print(response.choices[0].message.content)
788
+ ```
789
+
790
+ ### Streaming with SciraChat
791
+
792
+ ```python
793
+ from webscout.Provider.OPENAI import SciraChat
794
+
795
+ # Initialize the client
796
+ client = SciraChat()
797
+
798
+ # Create a streaming completion
799
+ stream = client.chat.completions.create(
800
+ model="scira-default",
801
+ messages=[
802
+ {"role": "system", "content": "You are a helpful assistant."},
803
+ {"role": "user", "content": "Write a short poem about programming."}
804
+ ],
805
+ stream=True
806
+ )
807
+
808
+ # Process the streaming response
809
+ for chunk in stream:
810
+ if chunk.choices[0].delta.content:
811
+ print(chunk.choices[0].delta.content, end="", flush=True)
812
+ print() # Add a newline at the end
813
+ ```
814
+
815
+ ### Basic Usage with FreeAIChat
816
+
817
+ ```python
818
+ from webscout.Provider.OPENAI import FreeAIChat
819
+
820
+ # Initialize the client
821
+ client = FreeAIChat()
822
+
823
+ # Create a completion (non-streaming)
824
+ response = client.chat.completions.create(
825
+ model="GPT 4o",
826
+ messages=[
827
+ {"role": "system", "content": "You are a helpful assistant."},
828
+ {"role": "user", "content": "Tell me about Python programming."}
829
+ ]
830
+ )
831
+
832
+ # Print the response
833
+ print(response.choices[0].message.content)
834
+ ```
835
+
836
+ ### Streaming with FreeAIChat
837
+
838
+ ```python
839
+ from webscout.Provider.OPENAI import FreeAIChat
840
+
841
+ # Initialize the client
842
+ client = FreeAIChat()
843
+
844
+ # Create a streaming completion
845
+ stream = client.chat.completions.create(
846
+ model="GPT 4o",
847
+ messages=[
848
+ {"role": "system", "content": "You are a helpful assistant."},
849
+ {"role": "user", "content": "Write a short poem about programming."}
850
+ ],
851
+ stream=True
852
+ )
853
+
854
+ # Process the streaming response
855
+ for chunk in stream:
856
+ if chunk.choices[0].delta.content:
857
+ print(chunk.choices[0].delta.content, end="", flush=True)
858
+ print() # Add a newline at the end
859
+ ```
860
+
861
+ ### Basic Usage with LLMChatCo
862
+
863
+ ```python
864
+ from webscout.Provider.OPENAI import LLMChatCo
865
+
866
+ # Initialize the client
867
+ client = LLMChatCo()
868
+
869
+ # Create a completion (non-streaming)
870
+ response = client.chat.completions.create(
871
+ model="gemini-flash-2.0", # Default model
872
+ messages=[
873
+ {"role": "system", "content": "You are a helpful assistant."},
874
+ {"role": "user", "content": "Tell me about Python programming."}
875
+ ],
876
+ temperature=0.7
877
+ )
878
+
879
+ # Print the response
880
+ print(response.choices[0].message.content)
881
+ ```
882
+
883
+ ### Streaming with LLMChatCo
884
+
885
+ ```python
886
+ from webscout.Provider.OPENAI import LLMChatCo
887
+
888
+ # Initialize the client
889
+ client = LLMChatCo()
890
+
891
+ # Create a streaming completion
892
+ stream = client.chat.completions.create(
893
+ model="gemini-flash-2.0",
894
+ messages=[
895
+ {"role": "system", "content": "You are a helpful assistant."},
896
+ {"role": "user", "content": "Write a short poem about programming."}
897
+ ],
898
+ stream=True
899
+ )
900
+
901
+ # Process the streaming response
902
+ for chunk in stream:
903
+ if chunk.choices[0].delta.content:
904
+ print(chunk.choices[0].delta.content, end="", flush=True)
905
+ print() # Add a newline at the end
906
+ ```
907
+
908
+ ### Basic Usage with YEPCHAT
909
+
910
+ ```python
911
+ from webscout.Provider.OPENAI import YEPCHAT
912
+
913
+ # Initialize the client
914
+ client = YEPCHAT()
915
+
916
+ # Create a completion (non-streaming)
917
+ response = client.chat.completions.create(
918
+ model="DeepSeek-R1-Distill-Qwen-32B",
919
+ messages=[
920
+ {"role": "system", "content": "You are a helpful assistant."},
921
+ {"role": "user", "content": "Tell me about Python programming."}
922
+ ],
923
+ temperature=0.7
924
+ )
925
+
926
+ # Print the response
927
+ print(response.choices[0].message.content)
928
+ ```
929
+
930
+ ### Basic Usage with SonusAI
931
+
932
+ ```python
933
+ from webscout.Provider.OPENAI import SonusAI
934
+
935
+ # Initialize the client
936
+ client = SonusAI()
937
+
938
+ # Create a completion (non-streaming)
939
+ response = client.chat.completions.create(
940
+ model="pro", # Choose from 'pro', 'air', or 'mini'
941
+ messages=[
942
+ {"role": "system", "content": "You are a helpful assistant."},
943
+ {"role": "user", "content": "Tell me about Python programming."}
944
+ ],
945
+ reasoning=True # Optional: Enable reasoning mode
946
+ )
947
+
948
+ # Print the response
949
+ print(response.choices[0].message.content)
950
+ ```
951
+
952
+ ### Streaming with YEPCHAT
953
+
954
+ ```python
955
+ from webscout.Provider.OPENAI import YEPCHAT
956
+
957
+ # Initialize the client
958
+ client = YEPCHAT()
959
+
960
+ # Create a streaming completion
961
+ stream = client.chat.completions.create(
962
+ model="Mixtral-8x7B-Instruct-v0.1",
963
+ messages=[
964
+ {"role": "system", "content": "You are a helpful assistant."},
965
+ {"role": "user", "content": "Write a short poem about programming."}
966
+ ],
967
+ stream=True
968
+ )
969
+
970
+ # Process the streaming response
971
+ for chunk in stream:
972
+ if chunk.choices[0].delta.content:
973
+ print(chunk.choices[0].delta.content, end="", flush=True)
974
+ print() # Add a newline at the end
975
+ ```
976
+
977
+ ### Streaming with SonusAI
978
+
979
+ ```python
980
+ from webscout.Provider.OPENAI import SonusAI
981
+
982
+ # Initialize the client
983
+ client = SonusAI(timeout=60)
984
+
985
+ # Create a streaming completion
986
+ stream = client.chat.completions.create(
987
+ model="air",
988
+ messages=[
989
+ {"role": "system", "content": "You are a helpful assistant."},
990
+ {"role": "user", "content": "Write a short poem about programming."}
991
+ ],
992
+ stream=True
993
+ )
994
+
995
+ # Process the streaming response
996
+ for chunk in stream:
997
+ if chunk.choices[0].delta.content:
998
+ print(chunk.choices[0].delta.content, end="", flush=True)
999
+ print() # Add a newline at the end
1000
+ ```
1001
+
1002
+ ### Basic Usage with ExaChat
1003
+
1004
+ ```python
1005
+ from webscout.Provider.OPENAI import ExaChat
1006
+
1007
+ # Initialize the client
1008
+ client = ExaChat()
1009
+
1010
+ # Create a completion (non-streaming)
1011
+ response = client.chat.completions.create(
1012
+ model="exaanswer", # Choose from many available models
1013
+ messages=[
1014
+ {"role": "system", "content": "You are a helpful assistant."},
1015
+ {"role": "user", "content": "Tell me about Python programming."}
1016
+ ]
1017
+ )
1018
+
1019
+ # Print the response
1020
+ print(response.choices[0].message.content)
1021
+ ```
1022
+
1023
+ ### Using Different ExaChat Providers
1024
+
1025
+ ```python
1026
+ from webscout.Provider.OPENAI import ExaChat
1027
+
1028
+ # Initialize the client
1029
+ client = ExaChat(timeout=60)
1030
+
1031
+ # Use a Gemini model
1032
+ gemini_response = client.chat.completions.create(
1033
+ model="gemini-2.0-flash",
1034
+ messages=[
1035
+ {"role": "system", "content": "You are a helpful assistant."},
1036
+ {"role": "user", "content": "Explain quantum computing in simple terms."}
1037
+ ]
1038
+ )
1039
+
1040
+ # Use a Groq model
1041
+ groq_response = client.chat.completions.create(
1042
+ model="llama-3.1-8b-instant",
1043
+ messages=[
1044
+ {"role": "user", "content": "Tell me about Python programming."}
1045
+ ]
1046
+ )
1047
+
1048
+ # Print the response
1049
+ print(response.choices[0].message.content)
1050
+ ```
1051
+
1052
+ ### Streaming with Netwrck
1053
+
1054
+ ```python
1055
+ from webscout.Provider.OPENAI import Netwrck
1056
+
1057
+ # Initialize the client
1058
+ client = Netwrck(timeout=60)
1059
+
1060
+ # Create a streaming completion
1061
+ stream = client.chat.completions.create(
1062
+ model="openai/gpt-4o-mini",
1063
+ messages=[
1064
+ {"role": "system", "content": "You are a helpful assistant."},
1065
+ {"role": "user", "content": "Write a short poem about programming."}
1066
+ ],
1067
+ stream=True
1068
+ )
1069
+
1070
+ # Process the streaming response
1071
+ for chunk in stream:
1072
+ if chunk.choices[0].delta.content:
1073
+ print(chunk.choices[0].delta.content, end="", flush=True)
1074
+ print() # Add a newline at the end
1075
+ ```
1076
+
1077
+ ### Basic Usage with StandardInput
1078
+
1079
+ ```python
1080
+ from webscout.Provider.OPENAI import StandardInput
1081
+
1082
+ # Initialize the client
1083
+ client = StandardInput()
1084
+
1085
+ # Create a completion (non-streaming)
1086
+ response = client.chat.completions.create(
1087
+ model="standard-quick",
1088
+ messages=[
1089
+ {"role": "system", "content": "You are a helpful assistant."},
1090
+ {"role": "user", "content": "Tell me about Python programming."}
1091
+ ]
1092
+ )
1093
+
1094
+ # Print the response
1095
+ print(response.choices[0].message.content)
1096
+ ```
1097
+
1098
+ ### Streaming with StandardInput
1099
+
1100
+ ```python
1101
+ from webscout.Provider.OPENAI import StandardInput
1102
+
1103
+ # Initialize the client
1104
+ client = StandardInput()
1105
+
1106
+ # Create a streaming completion
1107
+ stream = client.chat.completions.create(
1108
+ model="standard-reasoning",
1109
+ messages=[
1110
+ {"role": "system", "content": "You are a helpful assistant."},
1111
+ {"role": "user", "content": "Count from 1 to 5."}
1112
+ ],
1113
+ stream=True,
1114
+ enable_reasoning=True # Enable reasoning capabilities
1115
+ )
1116
+
1117
+ # Process the streaming response
1118
+ for chunk in stream:
1119
+ if chunk.choices[0].delta.content:
1120
+ print(chunk.choices[0].delta.content, end="", flush=True)
1121
+ print() # Add a newline at the end
1122
+ ```
1123
+
1124
+ ## 🔄 Response Format
1125
+
1126
+ All providers return responses that mimic the OpenAI API structure, ensuring compatibility with tools built for OpenAI.
1127
+
1128
+ ### 📝 Non-streaming Response
1129
+
1130
+ ```json
1131
+ {
1132
+ "id": "chatcmpl-123abc",
1133
+ "object": "chat.completion",
1134
+ "created": 1677858242,
1135
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
1136
+ "usage": {
1137
+ "prompt_tokens": 13,
1138
+ "completion_tokens": 7,
1139
+ "total_tokens": 20
1140
+ },
1141
+ "choices": [
1142
+ {
1143
+ "message": {
1144
+ "role": "assistant",
1145
+ "content": "This is a response from the model."
1146
+ },
1147
+ "finish_reason": "stop",
1148
+ "index": 0
1149
+ }
1150
+ ]
1151
+ }
1152
+ ```
1153
+
1154
+ ### 📱 Streaming Response Chunks
1155
+
1156
+ ```json
1157
+ {
1158
+ "id": "chatcmpl-123abc",
1159
+ "object": "chat.completion.chunk",
1160
+ "created": 1677858242,
1161
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
1162
+ "choices": [
1163
+ {
1164
+ "delta": {
1165
+ "content": "This "
1166
+ },
1167
+ "finish_reason": null,
1168
+ "index": 0
1169
+ }
1170
+ ]
1171
+ }
1172
+ ```
1173
+
1174
+ ## 🧩 Architecture
1175
+
1176
+ The OpenAI-compatible providers are built on a modular architecture:
1177
+
1178
+ * `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
1179
+ * `utils.py`: Provides data structures that mimic OpenAI's response format
1180
+ * Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
1181
+
1182
+ This architecture makes it easy to add new providers while maintaining a consistent interface.
1183
+
1184
+ ## 📝 Notes
1185
+
1186
+ * Some providers may require API keys for full functionality
1187
+ * Not all OpenAI features are supported by all providers
1188
+ * Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
1189
+
1190
+ ## 🤝 Contributing
1191
+
1192
+ Want to add a new OpenAI-compatible provider? Follow these steps:
1193
+
1194
+ 1. Create a new file in the `webscout/Provider/OPENAI` directory
1195
+ 2. Implement the `OpenAICompatibleProvider` interface
1196
+ 3. Add appropriate tests
1197
+ 4. Update this README with information about the new provider
1198
+
1199
+ ## 📚 Related Documentation
1200
+
1201
+ * [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
1202
+ * [DeepInfra Documentation](https://deepinfra.com/docs)
1203
+ * [Glider.so Website](https://glider.so/)
1204
+ * [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
1205
+ * [X0GPT Website](https://x0-gpt.devwtf.in/)
1206
+ * [WiseCat Website](https://wise-cat-groq.vercel.app/)
1207
+ * [Venice AI Website](https://venice.ai/)
1208
+ * [ExaAI Website](https://o3minichat.exa.ai/)
1209
+ * [TypeGPT Website](https://chat.typegpt.net/)
1210
+ * [SciraChat Website](https://scira.ai/)
1211
+ * [FreeAIChat Website](https://freeaichatplayground.com/)
1212
+ * [LLMChatCo Website](https://llmchat.co/)
1213
+ * [Yep.com Website](https://yep.com/)
1214
+ * [HeckAI Website](https://heck.ai/)
1215
+ * [SonusAI Website](https://chat.sonus.ai/)
1216
+ * [ExaChat Website](https://exa-chat.vercel.app/)
1217
+ * [Netwrck Website](https://netwrck.com/)
1218
+ * [StandardInput Website](https://chat.standard-input.com/)
1219
+
1220
+ <div align="center">
1221
+ <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
1222
+ <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
1223
+ </div>
1224
+
1225
+ ## Flowith OpenAI-Compatible Provider
1226
+
1227
+ This provider allows you to use the Flowith API with an OpenAI-compatible interface. It supports the following models:
1228
+
1229
+ - gpt-4.1-mini
1230
+ - deepseek-chat
1231
+ - deepseek-reasoner
1232
+ - claude-3.5-haiku
1233
+ - gemini-2.0-flash
1234
+ - gemini-2.5-flash
1235
+ - grok-3-mini
1236
+
1237
+ ### Usage Example
1238
+
1239
+ ```python
1240
+ from Provider.OPENAI.flowith import Flowith
1241
+
1242
+ client = Flowith()
1243
+ response = client.chat.completions.create(
1244
+ model="gpt-4.1-mini",
1245
+ messages=[{"role": "user", "content": "Hello!"}]
1246
+ )
1247
+ print(response.choices[0].message.content)
1248
+ ```
1249
+
1250
+ - `AVAILABLE_MODELS` and `models()` are provided for model discovery.
1251
+ - The provider is compatible with the OpenAI API interface used in this project.
1252
+
1253
+ See the source code for more details and advanced usage.