webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (281) hide show
  1. webscout/AIauto.py +33 -15
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +703 -250
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/Act.md +309 -0
  6. webscout/Extra/GitToolkit/__init__.py +10 -0
  7. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  8. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  10. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  11. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  12. webscout/Extra/YTToolkit/README.md +375 -0
  13. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  14. webscout/Extra/YTToolkit/__init__.py +3 -0
  15. webscout/Extra/YTToolkit/transcriber.py +476 -0
  16. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  17. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  18. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  19. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  20. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  21. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  22. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  23. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  24. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  25. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  26. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  27. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  28. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  29. webscout/Extra/__init__.py +7 -0
  30. webscout/Extra/autocoder/__init__.py +9 -0
  31. webscout/Extra/autocoder/autocoder.py +1105 -0
  32. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  33. webscout/Extra/gguf.md +430 -0
  34. webscout/Extra/gguf.py +684 -0
  35. webscout/Extra/tempmail/README.md +488 -0
  36. webscout/Extra/tempmail/__init__.py +28 -0
  37. webscout/Extra/tempmail/async_utils.py +141 -0
  38. webscout/Extra/tempmail/base.py +161 -0
  39. webscout/Extra/tempmail/cli.py +187 -0
  40. webscout/Extra/tempmail/emailnator.py +84 -0
  41. webscout/Extra/tempmail/mail_tm.py +361 -0
  42. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  43. webscout/Extra/weather.md +281 -0
  44. webscout/Extra/weather.py +194 -0
  45. webscout/Extra/weather_ascii.py +76 -0
  46. webscout/Litlogger/README.md +10 -0
  47. webscout/Litlogger/__init__.py +15 -0
  48. webscout/Litlogger/formats.py +4 -0
  49. webscout/Litlogger/handlers.py +103 -0
  50. webscout/Litlogger/levels.py +13 -0
  51. webscout/Litlogger/logger.py +92 -0
  52. webscout/Provider/AI21.py +177 -0
  53. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  54. webscout/Provider/AISEARCH/Perplexity.py +333 -0
  55. webscout/Provider/AISEARCH/README.md +279 -0
  56. webscout/Provider/AISEARCH/__init__.py +9 -0
  57. webscout/Provider/AISEARCH/felo_search.py +202 -0
  58. webscout/Provider/AISEARCH/genspark_search.py +324 -0
  59. webscout/Provider/AISEARCH/hika_search.py +186 -0
  60. webscout/Provider/AISEARCH/iask_search.py +410 -0
  61. webscout/Provider/AISEARCH/monica_search.py +220 -0
  62. webscout/Provider/AISEARCH/scira_search.py +298 -0
  63. webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
  64. webscout/Provider/Aitopia.py +316 -0
  65. webscout/Provider/AllenAI.py +440 -0
  66. webscout/Provider/Andi.py +228 -0
  67. webscout/Provider/Blackboxai.py +791 -0
  68. webscout/Provider/ChatGPTClone.py +237 -0
  69. webscout/Provider/ChatGPTGratis.py +194 -0
  70. webscout/Provider/ChatSandbox.py +342 -0
  71. webscout/Provider/Cloudflare.py +324 -0
  72. webscout/Provider/Cohere.py +208 -0
  73. webscout/Provider/Deepinfra.py +340 -0
  74. webscout/Provider/ExaAI.py +261 -0
  75. webscout/Provider/ExaChat.py +358 -0
  76. webscout/Provider/Flowith.py +217 -0
  77. webscout/Provider/FreeGemini.py +250 -0
  78. webscout/Provider/Gemini.py +169 -0
  79. webscout/Provider/GithubChat.py +369 -0
  80. webscout/Provider/GizAI.py +295 -0
  81. webscout/Provider/Glider.py +225 -0
  82. webscout/Provider/Groq.py +801 -0
  83. webscout/Provider/HF_space/__init__.py +0 -0
  84. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  85. webscout/Provider/HeckAI.py +375 -0
  86. webscout/Provider/HuggingFaceChat.py +469 -0
  87. webscout/Provider/Hunyuan.py +283 -0
  88. webscout/Provider/Jadve.py +291 -0
  89. webscout/Provider/Koboldai.py +384 -0
  90. webscout/Provider/LambdaChat.py +411 -0
  91. webscout/Provider/Llama3.py +259 -0
  92. webscout/Provider/MCPCore.py +315 -0
  93. webscout/Provider/Marcus.py +198 -0
  94. webscout/Provider/Nemotron.py +218 -0
  95. webscout/Provider/Netwrck.py +270 -0
  96. webscout/Provider/OLLAMA.py +396 -0
  97. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
  98. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  99. webscout/Provider/OPENAI/FreeGemini.py +283 -0
  100. webscout/Provider/OPENAI/NEMOTRON.py +232 -0
  101. webscout/Provider/OPENAI/Qwen3.py +283 -0
  102. webscout/Provider/OPENAI/README.md +952 -0
  103. webscout/Provider/OPENAI/TwoAI.py +357 -0
  104. webscout/Provider/OPENAI/__init__.py +40 -0
  105. webscout/Provider/OPENAI/ai4chat.py +293 -0
  106. webscout/Provider/OPENAI/api.py +969 -0
  107. webscout/Provider/OPENAI/base.py +249 -0
  108. webscout/Provider/OPENAI/c4ai.py +373 -0
  109. webscout/Provider/OPENAI/chatgpt.py +556 -0
  110. webscout/Provider/OPENAI/chatgptclone.py +494 -0
  111. webscout/Provider/OPENAI/chatsandbox.py +173 -0
  112. webscout/Provider/OPENAI/copilot.py +242 -0
  113. webscout/Provider/OPENAI/deepinfra.py +322 -0
  114. webscout/Provider/OPENAI/e2b.py +1414 -0
  115. webscout/Provider/OPENAI/exaai.py +417 -0
  116. webscout/Provider/OPENAI/exachat.py +444 -0
  117. webscout/Provider/OPENAI/flowith.py +162 -0
  118. webscout/Provider/OPENAI/freeaichat.py +359 -0
  119. webscout/Provider/OPENAI/glider.py +326 -0
  120. webscout/Provider/OPENAI/groq.py +364 -0
  121. webscout/Provider/OPENAI/heckai.py +308 -0
  122. webscout/Provider/OPENAI/llmchatco.py +335 -0
  123. webscout/Provider/OPENAI/mcpcore.py +389 -0
  124. webscout/Provider/OPENAI/multichat.py +376 -0
  125. webscout/Provider/OPENAI/netwrck.py +357 -0
  126. webscout/Provider/OPENAI/oivscode.py +287 -0
  127. webscout/Provider/OPENAI/opkfc.py +496 -0
  128. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  129. webscout/Provider/OPENAI/scirachat.py +477 -0
  130. webscout/Provider/OPENAI/sonus.py +304 -0
  131. webscout/Provider/OPENAI/standardinput.py +433 -0
  132. webscout/Provider/OPENAI/textpollinations.py +339 -0
  133. webscout/Provider/OPENAI/toolbaz.py +413 -0
  134. webscout/Provider/OPENAI/typefully.py +355 -0
  135. webscout/Provider/OPENAI/typegpt.py +364 -0
  136. webscout/Provider/OPENAI/uncovrAI.py +463 -0
  137. webscout/Provider/OPENAI/utils.py +318 -0
  138. webscout/Provider/OPENAI/venice.py +431 -0
  139. webscout/Provider/OPENAI/wisecat.py +387 -0
  140. webscout/Provider/OPENAI/writecream.py +163 -0
  141. webscout/Provider/OPENAI/x0gpt.py +365 -0
  142. webscout/Provider/OPENAI/yep.py +382 -0
  143. webscout/Provider/OpenGPT.py +209 -0
  144. webscout/Provider/Openai.py +496 -0
  145. webscout/Provider/PI.py +429 -0
  146. webscout/Provider/Perplexitylabs.py +415 -0
  147. webscout/Provider/QwenLM.py +254 -0
  148. webscout/Provider/Reka.py +214 -0
  149. webscout/Provider/StandardInput.py +290 -0
  150. webscout/Provider/TTI/README.md +82 -0
  151. webscout/Provider/TTI/__init__.py +7 -0
  152. webscout/Provider/TTI/aiarta.py +365 -0
  153. webscout/Provider/TTI/artbit.py +0 -0
  154. webscout/Provider/TTI/base.py +64 -0
  155. webscout/Provider/TTI/fastflux.py +200 -0
  156. webscout/Provider/TTI/magicstudio.py +201 -0
  157. webscout/Provider/TTI/piclumen.py +203 -0
  158. webscout/Provider/TTI/pixelmuse.py +225 -0
  159. webscout/Provider/TTI/pollinations.py +221 -0
  160. webscout/Provider/TTI/utils.py +11 -0
  161. webscout/Provider/TTS/README.md +192 -0
  162. webscout/Provider/TTS/__init__.py +10 -0
  163. webscout/Provider/TTS/base.py +159 -0
  164. webscout/Provider/TTS/deepgram.py +156 -0
  165. webscout/Provider/TTS/elevenlabs.py +111 -0
  166. webscout/Provider/TTS/gesserit.py +128 -0
  167. webscout/Provider/TTS/murfai.py +113 -0
  168. webscout/Provider/TTS/openai_fm.py +129 -0
  169. webscout/Provider/TTS/parler.py +111 -0
  170. webscout/Provider/TTS/speechma.py +580 -0
  171. webscout/Provider/TTS/sthir.py +94 -0
  172. webscout/Provider/TTS/streamElements.py +333 -0
  173. webscout/Provider/TTS/utils.py +280 -0
  174. webscout/Provider/TeachAnything.py +229 -0
  175. webscout/Provider/TextPollinationsAI.py +308 -0
  176. webscout/Provider/TwoAI.py +475 -0
  177. webscout/Provider/TypliAI.py +305 -0
  178. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  179. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  180. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  181. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  182. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  183. webscout/Provider/Venice.py +258 -0
  184. webscout/Provider/VercelAI.py +253 -0
  185. webscout/Provider/WiseCat.py +233 -0
  186. webscout/Provider/WrDoChat.py +370 -0
  187. webscout/Provider/Writecream.py +246 -0
  188. webscout/Provider/WritingMate.py +269 -0
  189. webscout/Provider/__init__.py +174 -0
  190. webscout/Provider/ai4chat.py +174 -0
  191. webscout/Provider/akashgpt.py +335 -0
  192. webscout/Provider/asksteve.py +220 -0
  193. webscout/Provider/cerebras.py +290 -0
  194. webscout/Provider/chatglm.py +215 -0
  195. webscout/Provider/cleeai.py +213 -0
  196. webscout/Provider/copilot.py +425 -0
  197. webscout/Provider/elmo.py +283 -0
  198. webscout/Provider/freeaichat.py +285 -0
  199. webscout/Provider/geminiapi.py +208 -0
  200. webscout/Provider/granite.py +235 -0
  201. webscout/Provider/hermes.py +266 -0
  202. webscout/Provider/julius.py +223 -0
  203. webscout/Provider/koala.py +170 -0
  204. webscout/Provider/learnfastai.py +325 -0
  205. webscout/Provider/llama3mitril.py +215 -0
  206. webscout/Provider/llmchat.py +258 -0
  207. webscout/Provider/llmchatco.py +306 -0
  208. webscout/Provider/lmarena.py +198 -0
  209. webscout/Provider/meta.py +801 -0
  210. webscout/Provider/multichat.py +364 -0
  211. webscout/Provider/oivscode.py +309 -0
  212. webscout/Provider/samurai.py +224 -0
  213. webscout/Provider/scira_chat.py +299 -0
  214. webscout/Provider/scnet.py +243 -0
  215. webscout/Provider/searchchat.py +292 -0
  216. webscout/Provider/sonus.py +258 -0
  217. webscout/Provider/talkai.py +194 -0
  218. webscout/Provider/toolbaz.py +353 -0
  219. webscout/Provider/turboseek.py +266 -0
  220. webscout/Provider/typefully.py +202 -0
  221. webscout/Provider/typegpt.py +289 -0
  222. webscout/Provider/uncovr.py +368 -0
  223. webscout/Provider/x0gpt.py +299 -0
  224. webscout/Provider/yep.py +389 -0
  225. webscout/__init__.py +4 -2
  226. webscout/cli.py +3 -28
  227. webscout/client.py +70 -0
  228. webscout/conversation.py +35 -35
  229. webscout/litagent/Readme.md +276 -0
  230. webscout/litagent/__init__.py +29 -0
  231. webscout/litagent/agent.py +455 -0
  232. webscout/litagent/constants.py +60 -0
  233. webscout/litprinter/__init__.py +59 -0
  234. webscout/optimizers.py +419 -419
  235. webscout/scout/README.md +404 -0
  236. webscout/scout/__init__.py +8 -0
  237. webscout/scout/core/__init__.py +7 -0
  238. webscout/scout/core/crawler.py +210 -0
  239. webscout/scout/core/scout.py +607 -0
  240. webscout/scout/core/search_result.py +96 -0
  241. webscout/scout/core/text_analyzer.py +63 -0
  242. webscout/scout/core/text_utils.py +277 -0
  243. webscout/scout/core/web_analyzer.py +52 -0
  244. webscout/scout/element.py +478 -0
  245. webscout/scout/parsers/__init__.py +69 -0
  246. webscout/scout/parsers/html5lib_parser.py +172 -0
  247. webscout/scout/parsers/html_parser.py +236 -0
  248. webscout/scout/parsers/lxml_parser.py +178 -0
  249. webscout/scout/utils.py +37 -0
  250. webscout/swiftcli/Readme.md +323 -0
  251. webscout/swiftcli/__init__.py +95 -0
  252. webscout/swiftcli/core/__init__.py +7 -0
  253. webscout/swiftcli/core/cli.py +297 -0
  254. webscout/swiftcli/core/context.py +104 -0
  255. webscout/swiftcli/core/group.py +241 -0
  256. webscout/swiftcli/decorators/__init__.py +28 -0
  257. webscout/swiftcli/decorators/command.py +221 -0
  258. webscout/swiftcli/decorators/options.py +220 -0
  259. webscout/swiftcli/decorators/output.py +252 -0
  260. webscout/swiftcli/exceptions.py +21 -0
  261. webscout/swiftcli/plugins/__init__.py +9 -0
  262. webscout/swiftcli/plugins/base.py +135 -0
  263. webscout/swiftcli/plugins/manager.py +269 -0
  264. webscout/swiftcli/utils/__init__.py +59 -0
  265. webscout/swiftcli/utils/formatting.py +252 -0
  266. webscout/swiftcli/utils/parsing.py +267 -0
  267. webscout/version.py +1 -1
  268. webscout/webscout_search.py +2 -182
  269. webscout/webscout_search_async.py +1 -179
  270. webscout/zeroart/README.md +89 -0
  271. webscout/zeroart/__init__.py +135 -0
  272. webscout/zeroart/base.py +66 -0
  273. webscout/zeroart/effects.py +101 -0
  274. webscout/zeroart/fonts.py +1239 -0
  275. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
  276. webscout-8.2.9.dist-info/RECORD +289 -0
  277. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  278. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  279. webscout-8.2.7.dist-info/RECORD +0 -26
  280. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  281. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,952 @@
1
+ <div align="center">
2
+ <a href="https://github.com/OEvortex/Webscout">
3
+ <img src="https://img.shields.io/badge/WebScout-OpenAI%20Compatible%20Providers-4285F4?style=for-the-badge&logo=openai&logoColor=white" alt="WebScout OpenAI Compatible Providers">
4
+ </a>
5
+ <br/>
6
+ <h1>WebScout OpenAI-Compatible Providers</h1>
7
+ <p><strong>Seamlessly integrate with various AI providers using OpenAI-compatible interfaces</strong></p>
8
+
9
+ <p>
10
+ <img src="https://img.shields.io/badge/Python-3.7+-3776AB?style=flat-square&logo=python&logoColor=white" alt="Python 3.7+">
11
+ <img src="https://img.shields.io/badge/License-MIT-green?style=flat-square" alt="License: MIT">
12
+ <img src="https://img.shields.io/badge/PRs-Welcome-brightgreen?style=flat-square" alt="PRs Welcome">
13
+ </p>
14
+
15
+ <p>
16
+ Access multiple AI providers through a standardized OpenAI-compatible interface, making it easy to switch between providers without changing your code.
17
+ </p>
18
+ </div>
19
+
20
+ ## 🚀 Overview
21
+
22
+ The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
+
24
+ * Use the same code structure across different AI providers
25
+ * Switch between providers without major code changes
26
+ * Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
+
28
+ ## ⚙️ Available Providers
29
+
30
+ Currently, the following providers are implemented with OpenAI-compatible interfaces:
31
+
32
+ - DeepInfra
33
+ - Glider
34
+ - ChatGPTClone
35
+ - X0GPT
36
+ - WiseCat
37
+ - Venice
38
+ - ExaAI
39
+ - TypeGPT
40
+ - SciraChat
41
+ - LLMChatCo
42
+ - FreeAIChat
43
+ - YEPCHAT
44
+ - HeckAI
45
+ - SonusAI
46
+ - ExaChat
47
+ - Netwrck
48
+ - StandardInput
49
+ - Writecream
50
+ - toolbaz
51
+ - UncovrAI
52
+ - OPKFC
53
+ - TextPollinations
54
+ - E2B
55
+ - MultiChatAI
56
+ - AI4Chat
57
+ - MCPCore
58
+ - TypefullyAI
59
+ - Flowith
60
+ - ChatSandbox
61
+ - Cloudflare
62
+ - NEMOTRON
63
+ - BLACKBOXAI
64
+ - Copilot
65
+ - TwoAI
66
+ - oivscode
67
+ - Qwen3
68
+ ---
69
+
70
+
71
+ ## 💻 Usage Examples
72
+
73
+ Here are examples of how to use the OpenAI-compatible providers in your code.
74
+
75
+ ### Basic Usage with DeepInfra
76
+
77
+ ```python
78
+ from webscout.client import DeepInfra
79
+
80
+ # Initialize the client
81
+ client = DeepInfra()
82
+
83
+ # Create a completion (non-streaming)
84
+ response = client.chat.completions.create(
85
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
86
+ messages=[
87
+ {"role": "system", "content": "You are a helpful assistant."},
88
+ {"role": "user", "content": "Tell me about Python programming."}
89
+ ],
90
+ temperature=0.7,
91
+ max_tokens=500
92
+ )
93
+
94
+ # Print the response
95
+ print(response.choices[0].message.content)
96
+ ```
97
+
98
+ ### Basic Usage with Glider
99
+
100
+ ```python
101
+ from webscout.client import Glider
102
+
103
+ # Initialize the client
104
+ client = Glider()
105
+
106
+ # Create a completion (non-streaming)
107
+ response = client.chat.completions.create(
108
+ model="chat-llama-3-1-70b",
109
+ messages=[
110
+ {"role": "system", "content": "You are a helpful assistant."},
111
+ {"role": "user", "content": "Tell me about Python programming."}
112
+ ],
113
+ max_tokens=500
114
+ )
115
+
116
+ # Print the response
117
+ print(response.choices[0].message.content)
118
+ ```
119
+
120
+ ### Streaming Responses (Example with DeepInfra)
121
+
122
+ ```python
123
+ from webscout.client import DeepInfra
124
+
125
+ # Initialize the client
126
+ client = DeepInfra()
127
+
128
+ # Create a streaming completion
129
+ stream = client.chat.completions.create(
130
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
131
+ messages=[
132
+ {"role": "system", "content": "You are a helpful assistant."},
133
+ {"role": "user", "content": "Write a short poem about programming."}
134
+ ],
135
+ stream=True,
136
+ temperature=0.7
137
+ )
138
+
139
+ # Process the streaming response
140
+ for chunk in stream:
141
+ if chunk.choices[0].delta.content:
142
+ print(chunk.choices[0].delta.content, end="", flush=True)
143
+ print() # Add a newline at the end
144
+ ```
145
+
146
+ ### Streaming with Glider
147
+
148
+ ```python
149
+ from webscout.client import Glider
150
+
151
+ # Initialize the client
152
+ client = Glider()
153
+
154
+ # Create a streaming completion
155
+ stream = client.chat.completions.create(
156
+ model="chat-llama-3-1-70b",
157
+ messages=[
158
+ {"role": "system", "content": "You are a helpful assistant."},
159
+ {"role": "user", "content": "Write a short poem about programming."}
160
+ ],
161
+ stream=True
162
+ )
163
+
164
+ # Process the streaming response
165
+ for chunk in stream:
166
+ if chunk.choices[0].delta.content:
167
+ print(chunk.choices[0].delta.content, end="", flush=True)
168
+ print() # Add a newline at the end
169
+ ```
170
+
171
+ ### Basic Usage with ChatGPTClone
172
+
173
+ ```python
174
+ from webscout.client import ChatGPTClone
175
+
176
+ # Initialize the client
177
+ client = ChatGPTClone()
178
+
179
+ # Create a completion (non-streaming)
180
+ response = client.chat.completions.create(
181
+ model="gpt-4",
182
+ messages=[
183
+ {"role": "system", "content": "You are a helpful assistant."},
184
+ {"role": "user", "content": "Tell me about Python programming."}
185
+ ],
186
+ temperature=0.7
187
+ )
188
+
189
+ # Print the response
190
+ print(response.choices[0].message.content)
191
+ ```
192
+
193
+ ### Streaming with ChatGPTClone
194
+
195
+ ```python
196
+ from webscout.client import ChatGPTClone
197
+
198
+ # Initialize the client
199
+ client = ChatGPTClone()
200
+
201
+ # Create a streaming completion
202
+ stream = client.chat.completions.create(
203
+ model="gpt-4",
204
+ messages=[
205
+ {"role": "system", "content": "You are a helpful assistant."},
206
+ {"role": "user", "content": "Write a short poem about programming."}
207
+ ],
208
+ stream=True
209
+ )
210
+
211
+ # Process the streaming response
212
+ for chunk in stream:
213
+ if chunk.choices[0].delta.content:
214
+ print(chunk.choices[0].delta.content, end="", flush=True)
215
+ print() # Add a newline at the end
216
+ ```
217
+
218
+ ### Basic Usage with X0GPT
219
+
220
+ ```python
221
+ from webscout.client import X0GPT
222
+
223
+ # Initialize the client
224
+ client = X0GPT()
225
+
226
+ # Create a completion (non-streaming)
227
+ response = client.chat.completions.create(
228
+ model="gpt-4", # Model name doesn't matter for X0GPT
229
+ messages=[
230
+ {"role": "system", "content": "You are a helpful assistant."},
231
+ {"role": "user", "content": "Tell me about Python programming."}
232
+ ]
233
+ )
234
+
235
+ # Print the response
236
+ print(response.choices[0].message.content)
237
+ ```
238
+
239
+ ### Streaming with X0GPT
240
+
241
+ ```python
242
+ from webscout.client import X0GPT
243
+
244
+ # Initialize the client
245
+ client = X0GPT()
246
+
247
+ # Create a streaming completion
248
+ stream = client.chat.completions.create(
249
+ model="gpt-4", # Model name doesn't matter for X0GPT
250
+ messages=[
251
+ {"role": "system", "content": "You are a helpful assistant."},
252
+ {"role": "user", "content": "Write a short poem about programming."}
253
+ ],
254
+ stream=True
255
+ )
256
+
257
+ # Process the streaming response
258
+ for chunk in stream:
259
+ if chunk.choices[0].delta.content:
260
+ print(chunk.choices[0].delta.content, end="", flush=True)
261
+ print() # Add a newline at the end
262
+ ```
263
+
264
+ ### Basic Usage with WiseCat
265
+
266
+ ```python
267
+ from webscout.client import WiseCat
268
+
269
+ # Initialize the client
270
+ client = WiseCat()
271
+
272
+ # Create a completion (non-streaming)
273
+ response = client.chat.completions.create(
274
+ model="chat-model-small",
275
+ messages=[
276
+ {"role": "system", "content": "You are a helpful assistant."},
277
+ {"role": "user", "content": "Tell me about Python programming."}
278
+ ]
279
+ )
280
+
281
+ # Print the response
282
+ print(response.choices[0].message.content)
283
+ ```
284
+
285
+ ### Streaming with WiseCat
286
+
287
+ ```python
288
+ from webscout.client import WiseCat
289
+
290
+ # Initialize the client
291
+ client = WiseCat()
292
+
293
+ # Create a streaming completion
294
+ stream = client.chat.completions.create(
295
+ model="chat-model-small",
296
+ messages=[
297
+ {"role": "system", "content": "You are a helpful assistant."},
298
+ {"role": "user", "content": "Write a short poem about programming."}
299
+ ],
300
+ stream=True
301
+ )
302
+
303
+ # Process the streaming response
304
+ for chunk in stream:
305
+ if chunk.choices[0].delta.content:
306
+ print(chunk.choices[0].delta.content, end="", flush=True)
307
+ print() # Add a newline at the end
308
+ ```
309
+
310
+ ### Basic Usage with Venice
311
+
312
+ ```python
313
+ from webscout.client import Venice
314
+
315
+ # Initialize the client
316
+ client = Venice(temperature=0.7, top_p=0.9)
317
+
318
+ # Create a completion (non-streaming)
319
+ response = client.chat.completions.create(
320
+ model="mistral-31-24b",
321
+ messages=[
322
+ {"role": "system", "content": "You are a helpful assistant."},
323
+ {"role": "user", "content": "Tell me about Python programming."}
324
+ ]
325
+ )
326
+
327
+ # Print the response
328
+ print(response.choices[0].message.content)
329
+ ```
330
+
331
+ ### Streaming with Venice
332
+
333
+ ```python
334
+ from webscout.client import Venice
335
+
336
+ # Initialize the client
337
+ client = Venice()
338
+
339
+ # Create a streaming completion
340
+ stream = client.chat.completions.create(
341
+ model="mistral-31-24b",
342
+ messages=[
343
+ {"role": "system", "content": "You are a helpful assistant."},
344
+ {"role": "user", "content": "Write a short poem about programming."}
345
+ ],
346
+ stream=True
347
+ )
348
+
349
+ # Process the streaming response
350
+ for chunk in stream:
351
+ if chunk.choices[0].delta.content:
352
+ print(chunk.choices[0].delta.content, end="", flush=True)
353
+ print() # Add a newline at the end
354
+ ```
355
+
356
+ ### Basic Usage with ExaAI
357
+
358
+ ```python
359
+ from webscout.client import ExaAI
360
+
361
+ # Initialize the client
362
+ client = ExaAI()
363
+
364
+ # Create a completion (non-streaming)
365
+ response = client.chat.completions.create(
366
+ model="O3-Mini",
367
+ messages=[
368
+ # Note: ExaAI does not support system messages (they will be removed)
369
+ {"role": "user", "content": "Hello!"},
370
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
371
+ {"role": "user", "content": "Tell me about Python programming."}
372
+ ]
373
+ )
374
+
375
+ # Print the response
376
+ print(response.choices[0].message.content)
377
+ ```
378
+
379
+ ### Basic Usage with HeckAI
380
+
381
+ ```python
382
+ from webscout.client import HeckAI
383
+
384
+ # Initialize the client
385
+ client = HeckAI(language="English")
386
+
387
+ # Create a completion (non-streaming)
388
+ response = client.chat.completions.create(
389
+ model="google/gemini-2.0-flash-001",
390
+ messages=[
391
+ {"role": "system", "content": "You are a helpful assistant."},
392
+ {"role": "user", "content": "Tell me about Python programming."}
393
+ ]
394
+ )
395
+
396
+ # Print the response
397
+ print(response.choices[0].message.content)
398
+ ```
399
+
400
+ ### Streaming with HeckAI
401
+
402
+ ```python
403
+ from webscout.client import HeckAI
404
+
405
+ # Initialize the client
406
+ client = HeckAI()
407
+
408
+ # Create a streaming completion
409
+ stream = client.chat.completions.create(
410
+ model="google/gemini-2.0-flash-001",
411
+ messages=[
412
+ {"role": "system", "content": "You are a helpful assistant."},
413
+ {"role": "user", "content": "Write a short poem about programming."}
414
+ ],
415
+ stream=True
416
+ )
417
+
418
+ # Process the streaming response
419
+ for chunk in stream:
420
+ if chunk.choices[0].delta.content:
421
+ print(chunk.choices[0].delta.content, end="", flush=True)
422
+ print() # Add a newline at the end
423
+ ```
424
+
425
+ ### Streaming with ExaAI
426
+
427
+ ```python
428
+ from webscout.client import ExaAI
429
+
430
+ # Initialize the client
431
+ client = ExaAI()
432
+
433
+ # Create a streaming completion
434
+ stream = client.chat.completions.create(
435
+ model="O3-Mini",
436
+ messages=[
437
+ # Note: ExaAI does not support system messages (they will be removed)
438
+ {"role": "user", "content": "Hello!"},
439
+ {"role": "assistant", "content": "Hi there! How can I help you today?"},
440
+ {"role": "user", "content": "Write a short poem about programming."}
441
+ ],
442
+ stream=True
443
+ )
444
+
445
+ # Process the streaming response
446
+ for chunk in stream:
447
+ if chunk.choices[0].delta.content:
448
+ print(chunk.choices[0].delta.content, end="", flush=True)
449
+ print() # Add a newline at the end
450
+ ```
451
+
452
+ ### Basic Usage with TypeGPT
453
+
454
+ ```python
455
+ from webscout.client import TypeGPT
456
+
457
+ # Initialize the client
458
+ client = TypeGPT()
459
+
460
+ # Create a completion (non-streaming)
461
+ response = client.chat.completions.create(
462
+ model="chatgpt-4o-latest",
463
+ messages=[
464
+ {"role": "system", "content": "You are a helpful assistant."},
465
+ {"role": "user", "content": "Write a short poem about programming."}
466
+ ]
467
+ )
468
+
469
+ # Print the response
470
+ print(response.choices[0].message.content)
471
+ ```
472
+
473
+ ### Streaming with TypeGPT
474
+
475
+ ```python
476
+ from webscout.client import TypeGPT
477
+
478
+ # Initialize the client
479
+ client = TypeGPT()
480
+
481
+ # Create a streaming completion
482
+ stream = client.chat.completions.create(
483
+ model="chatgpt-4o-latest",
484
+ messages=[
485
+ {"role": "system", "content": "You are a helpful assistant."},
486
+ {"role": "user", "content": "Write a short poem about programming."}
487
+ ],
488
+ stream=True
489
+ )
490
+
491
+ # Process the streaming response
492
+ for chunk in stream:
493
+ if chunk.choices[0].delta.content:
494
+ print(chunk.choices[0].delta.content, end="", flush=True)
495
+ print() # Add a newline at the end
496
+ ```
497
+
498
+ ### Basic Usage with SciraChat
499
+
500
+ ```python
501
+ from webscout.client import SciraChat
502
+
503
+ # Initialize the client
504
+ client = SciraChat()
505
+
506
+ # Create a completion (non-streaming)
507
+ response = client.chat.completions.create(
508
+ model="scira-default",
509
+ messages=[
510
+ {"role": "system", "content": "You are a helpful assistant."},
511
+ {"role": "user", "content": "Tell me about Python programming."}
512
+ ]
513
+ )
514
+
515
+ # Print the response
516
+ print(response.choices[0].message.content)
517
+ ```
518
+
519
+ ### Streaming with SciraChat
520
+
521
+ ```python
522
+ from webscout.client import SciraChat
523
+
524
+ # Initialize the client
525
+ client = SciraChat()
526
+
527
+ # Create a streaming completion
528
+ stream = client.chat.completions.create(
529
+ model="scira-default",
530
+ messages=[
531
+ {"role": "system", "content": "You are a helpful assistant."},
532
+ {"role": "user", "content": "Write a short poem about programming."}
533
+ ],
534
+ stream=True
535
+ )
536
+
537
+ # Process the streaming response
538
+ for chunk in stream:
539
+ if chunk.choices[0].delta.content:
540
+ print(chunk.choices[0].delta.content, end="", flush=True)
541
+ print() # Add a newline at the end
542
+ ```
543
+
544
+ ### Basic Usage with FreeAIChat
545
+
546
+ ```python
547
+ from webscout.client import FreeAIChat
548
+
549
+ # Initialize the client
550
+ client = FreeAIChat()
551
+
552
+ # Create a completion (non-streaming)
553
+ response = client.chat.completions.create(
554
+ model="GPT 4o",
555
+ messages=[
556
+ {"role": "system", "content": "You are a helpful assistant."},
557
+ {"role": "user", "content": "Tell me about Python programming."}
558
+ ]
559
+ )
560
+
561
+ # Print the response
562
+ print(response.choices[0].message.content)
563
+ ```
564
+
565
+ ### Streaming with FreeAIChat
566
+
567
+ ```python
568
+ from webscout.client import FreeAIChat
569
+
570
+ # Initialize the client
571
+ client = FreeAIChat()
572
+
573
+ # Create a streaming completion
574
+ stream = client.chat.completions.create(
575
+ model="GPT 4o",
576
+ messages=[
577
+ {"role": "system", "content": "You are a helpful assistant."},
578
+ {"role": "user", "content": "Write a short poem about programming."}
579
+ ],
580
+ stream=True
581
+ )
582
+
583
+ # Process the streaming response
584
+ for chunk in stream:
585
+ if chunk.choices[0].delta.content:
586
+ print(chunk.choices[0].delta.content, end="", flush=True)
587
+ print() # Add a newline at the end
588
+ ```
589
+
590
+ ### Basic Usage with LLMChatCo
591
+
592
+ ```python
593
+ from webscout.client import LLMChatCo
594
+
595
+ # Initialize the client
596
+ client = LLMChatCo()
597
+
598
+ # Create a completion (non-streaming)
599
+ response = client.chat.completions.create(
600
+ model="gemini-flash-2.0", # Default model
601
+ messages=[
602
+ {"role": "system", "content": "You are a helpful assistant."},
603
+ {"role": "user", "content": "Tell me about Python programming."}
604
+ ],
605
+ temperature=0.7
606
+ )
607
+
608
+ # Print the response
609
+ print(response.choices[0].message.content)
610
+ ```
611
+
612
+ ### Streaming with LLMChatCo
613
+
614
+ ```python
615
+ from webscout.client import LLMChatCo
616
+
617
+ # Initialize the client
618
+ client = LLMChatCo()
619
+
620
+ # Create a streaming completion
621
+ stream = client.chat.completions.create(
622
+ model="gemini-flash-2.0",
623
+ messages=[
624
+ {"role": "system", "content": "You are a helpful assistant."},
625
+ {"role": "user", "content": "Write a short poem about programming."}
626
+ ],
627
+ stream=True
628
+ )
629
+
630
+ # Process the streaming response
631
+ for chunk in stream:
632
+ if chunk.choices[0].delta.content:
633
+ print(chunk.choices[0].delta.content, end="", flush=True)
634
+ print() # Add a newline at the end
635
+ ```
636
+
637
+ ### Basic Usage with YEPCHAT
638
+
639
+ ```python
640
+ from webscout.client import YEPCHAT
641
+
642
+ # Initialize the client
643
+ client = YEPCHAT()
644
+
645
+ # Create a completion (non-streaming)
646
+ response = client.chat.completions.create(
647
+ model="DeepSeek-R1-Distill-Qwen-32B",
648
+ messages=[
649
+ {"role": "system", "content": "You are a helpful assistant."},
650
+ {"role": "user", "content": "Tell me about Python programming."}
651
+ ],
652
+ temperature=0.7
653
+ )
654
+
655
+ # Print the response
656
+ print(response.choices[0].message.content)
657
+ ```
658
+
659
+ ### Basic Usage with SonusAI
660
+
661
+ ```python
662
+ from webscout.client import SonusAI
663
+
664
+ # Initialize the client
665
+ client = SonusAI()
666
+
667
+ # Create a completion (non-streaming)
668
+ response = client.chat.completions.create(
669
+ model="pro", # Choose from 'pro', 'air', or 'mini'
670
+ messages=[
671
+ {"role": "system", "content": "You are a helpful assistant."},
672
+ {"role": "user", "content": "Tell me about Python programming."}
673
+ ],
674
+ reasoning=True # Optional: Enable reasoning mode
675
+ )
676
+
677
+ # Print the response
678
+ print(response.choices[0].message.content)
679
+ ```
680
+
681
+ ### Streaming with YEPCHAT
682
+
683
+ ```python
684
+ from webscout.client import YEPCHAT
685
+
686
+ # Initialize the client
687
+ client = YEPCHAT()
688
+
689
+ # Create a streaming completion
690
+ stream = client.chat.completions.create(
691
+ model="Mixtral-8x7B-Instruct-v0.1",
692
+ messages=[
693
+ {"role": "system", "content": "You are a helpful assistant."},
694
+ {"role": "user", "content": "Write a short poem about programming."}
695
+ ],
696
+ stream=True
697
+ )
698
+
699
+ # Process the streaming response
700
+ for chunk in stream:
701
+ if chunk.choices[0].delta.content:
702
+ print(chunk.choices[0].delta.content, end="", flush=True)
703
+ print() # Add a newline at the end
704
+ ```
705
+
706
+ ### Streaming with SonusAI
707
+
708
+ ```python
709
+ from webscout.client import SonusAI
710
+
711
+ # Initialize the client
712
+ client = SonusAI(timeout=60)
713
+
714
+ # Create a streaming completion
715
+ stream = client.chat.completions.create(
716
+ model="air",
717
+ messages=[
718
+ {"role": "system", "content": "You are a helpful assistant."},
719
+ {"role": "user", "content": "Write a short poem about programming."}
720
+ ],
721
+ stream=True
722
+ )
723
+
724
+ # Process the streaming response
725
+ for chunk in stream:
726
+ if chunk.choices[0].delta.content:
727
+ print(chunk.choices[0].delta.content, end="", flush=True)
728
+ print() # Add a newline at the end
729
+ ```
730
+
731
+ ### Basic Usage with ExaChat
732
+
733
+ ```python
734
+ from webscout.client import ExaChat
735
+
736
+ # Initialize the client
737
+ client = ExaChat()
738
+
739
+ # Create a completion (non-streaming)
740
+ response = client.chat.completions.create(
741
+ model="exaanswer", # Choose from many available models
742
+ messages=[
743
+ {"role": "system", "content": "You are a helpful assistant."},
744
+ {"role": "user", "content": "Tell me about Python programming."}
745
+ ]
746
+ )
747
+
748
+ # Print the response
749
+ print(response.choices[0].message.content)
750
+ ```
751
+
752
+ ### Using Different ExaChat Providers
753
+
754
+ ```python
755
+ from webscout.client import ExaChat
756
+
757
+ # Initialize the client
758
+ client = ExaChat(timeout=60)
759
+
760
+ # Use a Gemini model
761
+ gemini_response = client.chat.completions.create(
762
+ model="gemini-2.0-flash",
763
+ messages=[
764
+ {"role": "system", "content": "You are a helpful assistant."},
765
+ {"role": "user", "content": "Explain quantum computing in simple terms."}
766
+ ]
767
+ )
768
+
769
+ # Use a Groq model
770
+ groq_response = client.chat.completions.create(
771
+ model="llama-3.1-8b-instant",
772
+ messages=[
773
+ {"role": "user", "content": "Tell me about Python programming."}
774
+ ]
775
+ )
776
+
777
+ # Print the response
778
+ print(response.choices[0].message.content)
779
+ ```
780
+
781
+ ### Streaming with Netwrck
782
+
783
+ ```python
784
+ from webscout.client import Netwrck
785
+
786
+ # Initialize the client
787
+ client = Netwrck(timeout=60)
788
+
789
+ # Create a streaming completion
790
+ stream = client.chat.completions.create(
791
+ model="openai/gpt-4o-mini",
792
+ messages=[
793
+ {"role": "system", "content": "You are a helpful assistant."},
794
+ {"role": "user", "content": "Write a short poem about programming."}
795
+ ],
796
+ stream=True
797
+ )
798
+
799
+ # Process the streaming response
800
+ for chunk in stream:
801
+ if chunk.choices[0].delta.content:
802
+ print(chunk.choices[0].delta.content, end="", flush=True)
803
+ print() # Add a newline at the end
804
+ ```
805
+
806
+ ### Basic Usage with StandardInput
807
+
808
+ ```python
809
+ from webscout.client import StandardInput
810
+
811
+ # Initialize the client
812
+ client = StandardInput()
813
+
814
+ # Create a completion (non-streaming)
815
+ response = client.chat.completions.create(
816
+ model="standard-quick",
817
+ messages=[
818
+ {"role": "system", "content": "You are a helpful assistant."},
819
+ {"role": "user", "content": "Tell me about Python programming."}
820
+ ]
821
+ )
822
+
823
+ # Print the response
824
+ print(response.choices[0].message.content)
825
+ ```
826
+
827
+ ### Streaming with StandardInput
828
+
829
+ ```python
830
+ from webscout.client import StandardInput
831
+
832
+ # Initialize the client
833
+ client = StandardInput()
834
+
835
+ # Create a streaming completion
836
+ stream = client.chat.completions.create(
837
+ model="standard-reasoning",
838
+ messages=[
839
+ {"role": "system", "content": "You are a helpful assistant."},
840
+ {"role": "user", "content": "Count from 1 to 5."}
841
+ ],
842
+ stream=True,
843
+ enable_reasoning=True # Enable reasoning capabilities
844
+ )
845
+
846
+ # Process the streaming response
847
+ for chunk in stream:
848
+ if chunk.choices[0].delta.content:
849
+ print(chunk.choices[0].delta.content, end="", flush=True)
850
+ print() # Add a newline at the end
851
+ ```
852
+
853
+ ## 🔄 Response Format
854
+
855
+ All providers return responses that mimic the OpenAI API structure, ensuring compatibility with tools built for OpenAI.
856
+
857
+ ### 📝 Non-streaming Response
858
+
859
+ ```json
860
+ {
861
+ "id": "chatcmpl-123abc",
862
+ "object": "chat.completion",
863
+ "created": 1677858242,
864
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
865
+ "usage": {
866
+ "prompt_tokens": 13,
867
+ "completion_tokens": 7,
868
+ "total_tokens": 20
869
+ },
870
+ "choices": [
871
+ {
872
+ "message": {
873
+ "role": "assistant",
874
+ "content": "This is a response from the model."
875
+ },
876
+ "finish_reason": "stop",
877
+ "index": 0
878
+ }
879
+ ]
880
+ }
881
+ ```
882
+
883
+ ### 📱 Streaming Response Chunks
884
+
885
+ ```json
886
+ {
887
+ "id": "chatcmpl-123abc",
888
+ "object": "chat.completion.chunk",
889
+ "created": 1677858242,
890
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
891
+ "choices": [
892
+ {
893
+ "delta": {
894
+ "content": "This "
895
+ },
896
+ "finish_reason": null,
897
+ "index": 0
898
+ }
899
+ ]
900
+ }
901
+ ```
902
+
903
+ ## 🧩 Architecture
904
+
905
+ The OpenAI-compatible providers are built on a modular architecture:
906
+
907
+ * `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
908
+ * `utils.py`: Provides data structures that mimic OpenAI's response format
909
+ * Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
910
+
911
+ This architecture makes it easy to add new providers while maintaining a consistent interface.
912
+
913
+ ## 📝 Notes
914
+
915
+ * Some providers may require API keys for full functionality
916
+ * Not all OpenAI features are supported by all providers
917
+ * Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
918
+
919
+ ## 🤝 Contributing
920
+
921
+ Want to add a new OpenAI-compatible provider? Follow these steps:
922
+
923
+ 1. Create a new file in the `webscout/Provider/OPENAI` directory
924
+ 2. Implement the `OpenAICompatibleProvider` interface
925
+ 3. Add appropriate tests
926
+ 4. Update this README with information about the new provider
927
+
928
+ ## 📚 Related Documentation
929
+
930
+ * [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
931
+ * [DeepInfra Documentation](https://deepinfra.com/docs)
932
+ * [Glider.so Website](https://glider.so/)
933
+ * [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
934
+ * [X0GPT Website](https://x0-gpt.devwtf.in/)
935
+ * [WiseCat Website](https://wise-cat-groq.vercel.app/)
936
+ * [Venice AI Website](https://venice.ai/)
937
+ * [ExaAI Website](https://o3minichat.exa.ai/)
938
+ * [TypeGPT Website](https://chat.typegpt.net/)
939
+ * [SciraChat Website](https://scira.ai/)
940
+ * [FreeAIChat Website](https://freeaichatplayground.com/)
941
+ * [LLMChatCo Website](https://llmchat.co/)
942
+ * [Yep.com Website](https://yep.com/)
943
+ * [HeckAI Website](https://heck.ai/)
944
+ * [SonusAI Website](https://chat.sonus.ai/)
945
+ * [ExaChat Website](https://exa-chat.vercel.app/)
946
+ * [Netwrck Website](https://netwrck.com/)
947
+ * [StandardInput Website](https://chat.standard-input.com/)
948
+
949
+ <div align="center">
950
+ <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
951
+ <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
952
+ </div>