webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,309 @@
1
+ import secrets
2
+ import requests
3
+ import json
4
+ import random
5
+ import string
6
+ from typing import Union, Any, Dict, Optional, Generator
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+
14
+
15
+ class oivscode(Provider):
16
+ """
17
+ A class to interact with a test API.
18
+ """
19
+ AVAILABLE_MODELS = [
20
+ "*",
21
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
22
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
23
+ "claude-3-5-sonnet-20240620",
24
+ "claude-3-5-sonnet-20241022",
25
+ "claude-3-7-sonnet-20250219",
26
+ "custom/blackbox-base",
27
+ "custom/blackbox-pro",
28
+ "custom/blackbox-pro-designer",
29
+ "custom/blackbox-pro-plus",
30
+ "deepseek-r1",
31
+ "deepseek-v3",
32
+ "deepseek/deepseek-chat",
33
+ "gemini-2.5-pro-preview-03-25",
34
+ "gpt-4o-mini",
35
+ "grok-3-beta",
36
+ "image-gen",
37
+ "llama-4-maverick-17b-128e-instruct-fp8",
38
+ "o1",
39
+ "o3-mini",
40
+ "o4-mini",
41
+ "transcribe",
42
+ "anthropic/claude-sonnet-4"
43
+ ]
44
+
45
+
46
+ def __init__(
47
+ self,
48
+ is_conversation: bool = True,
49
+ max_tokens: int = 1024,
50
+ timeout: int = 30,
51
+ intro: str = None,
52
+ filepath: str = None,
53
+ update_file: bool = True,
54
+ proxies: dict = {},
55
+ history_offset: int = 10250,
56
+ act: str = None,
57
+ model: str = "claude-3-5-sonnet-20240620",
58
+ system_prompt: str = "You are a helpful AI assistant.",
59
+
60
+ ):
61
+ """
62
+ Initializes the oivscode with given parameters.
63
+ """
64
+ if model not in self.AVAILABLE_MODELS:
65
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
66
+
67
+
68
+ self.session = requests.Session()
69
+ self.is_conversation = is_conversation
70
+ self.max_tokens_to_sample = max_tokens
71
+ self.api_endpoints = [
72
+ "https://oi-vscode-server.onrender.com/v1/chat/completions",
73
+ "https://oi-vscode-server-2.onrender.com/v1/chat/completions",
74
+ "https://oi-vscode-server-5.onrender.com/v1/chat/completions",
75
+ "https://oi-vscode-server-0501.onrender.com/v1/chat/completions"
76
+ ]
77
+ self.api_endpoint = random.choice(self.api_endpoints)
78
+ self.timeout = timeout
79
+ self.last_response = {}
80
+ self.model = model
81
+ self.system_prompt = system_prompt
82
+ self.headers = {
83
+ "accept": "*/*",
84
+ "accept-language": "en-US,en;q=0.9,en-GB;q=0.8,en-IN;q=0.7",
85
+ "cache-control": "no-cache",
86
+ "content-type": "application/json",
87
+ "pragma": "no-cache",
88
+ "priority": "u=1, i",
89
+ "sec-ch-ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
90
+ "sec-ch-ua-mobile": "?0",
91
+ "sec-ch-ua-platform": '"Windows"',
92
+ "sec-fetch-dest": "empty",
93
+ "sec-fetch-mode": "cors",
94
+ "sec-fetch-site": "same-site",
95
+ }
96
+ self.userid = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(21))
97
+ self.headers["userid"] = self.userid
98
+
99
+
100
+ self.__available_optimizers = (
101
+ method
102
+ for method in dir(Optimizers)
103
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
104
+ )
105
+ self.session.headers.update(self.headers)
106
+ Conversation.intro = (
107
+ AwesomePrompts().get_act(
108
+ act, raise_not_found=True, default=None, case_insensitive=True
109
+ )
110
+ if act
111
+ else intro or Conversation.intro
112
+ )
113
+ self.conversation = Conversation(
114
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
115
+ )
116
+ self.conversation.history_offset = history_offset
117
+ self.session.proxies = proxies
118
+
119
+ def _post_with_failover(self, payload, stream, timeout):
120
+ """Try all endpoints until one succeeds, else raise last error."""
121
+ endpoints = self.api_endpoints.copy()
122
+ random.shuffle(endpoints)
123
+ last_exception = None
124
+ for endpoint in endpoints:
125
+ try:
126
+ response = self.session.post(endpoint, json=payload, stream=stream, timeout=timeout)
127
+ if not response.ok:
128
+ last_exception = exceptions.FailedToGenerateResponseError(
129
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
130
+ )
131
+ continue
132
+ return response
133
+ except Exception as e:
134
+ last_exception = e
135
+ continue
136
+ if last_exception:
137
+ raise last_exception
138
+ raise exceptions.FailedToGenerateResponseError("All API endpoints failed.")
139
+
140
+ def ask(
141
+ self,
142
+ prompt: str,
143
+ stream: bool = False,
144
+ raw: bool = False,
145
+ optimizer: str = None,
146
+ conversationally: bool = False,
147
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
148
+ """Chat with AI (DeepInfra-style streaming and non-streaming)"""
149
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
150
+ if optimizer:
151
+ if optimizer in self.__available_optimizers:
152
+ conversation_prompt = getattr(Optimizers, optimizer)(
153
+ conversation_prompt if conversationally else prompt
154
+ )
155
+ else:
156
+ raise Exception(
157
+ f"Optimizer is not one of {self.__available_optimizers}"
158
+ )
159
+
160
+ payload = {
161
+ "model": self.model,
162
+ "messages": [
163
+ {"role": "system", "content": self.system_prompt},
164
+ {"role": "user", "content": conversation_prompt},
165
+ ],
166
+ "stream": stream
167
+ }
168
+
169
+ def for_stream():
170
+ streaming_text = ""
171
+ try:
172
+ response = self._post_with_failover(payload, stream=True, timeout=self.timeout)
173
+ response.raise_for_status()
174
+ # Use sanitize_stream for robust OpenAI-style streaming
175
+ processed_stream = sanitize_stream(
176
+ data=response.iter_content(chunk_size=None),
177
+ intro_value="data:",
178
+ to_json=True,
179
+ skip_markers=["[DONE]"],
180
+ content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("delta", {}).get("content") if isinstance(chunk, dict) else None,
181
+ yield_raw_on_error=False
182
+ )
183
+ for content_chunk in processed_stream:
184
+ if content_chunk and isinstance(content_chunk, str):
185
+ streaming_text += content_chunk
186
+ resp = dict(text=content_chunk)
187
+ yield resp if not raw else content_chunk
188
+ except Exception as e:
189
+ raise exceptions.FailedToGenerateResponseError(f"Streaming request failed: {e}") from e
190
+ finally:
191
+ if streaming_text:
192
+ self.last_response = {"text": streaming_text}
193
+ self.conversation.update_chat_history(prompt, streaming_text)
194
+
195
+ def for_non_stream():
196
+ try:
197
+ response = self._post_with_failover(payload, stream=False, timeout=self.timeout)
198
+ response.raise_for_status()
199
+ response_text = response.text
200
+ processed_stream = sanitize_stream(
201
+ data=response_text,
202
+ to_json=True,
203
+ intro_value=None,
204
+ content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
205
+ yield_raw_on_error=False
206
+ )
207
+ content = next(processed_stream, None)
208
+ content = content if isinstance(content, str) else ""
209
+ self.last_response = {"text": content}
210
+ self.conversation.update_chat_history(prompt, content)
211
+ return self.last_response if not raw else content
212
+ except Exception as e:
213
+ raise exceptions.FailedToGenerateResponseError(f"Non-streaming request failed: {e}") from e
214
+
215
+ return for_stream() if stream else for_non_stream()
216
+
217
+
218
+ def chat(
219
+ self,
220
+ prompt: str,
221
+ stream: bool = False,
222
+ optimizer: str = None,
223
+ conversationally: bool = False,
224
+ ) -> Union[str, Generator[str, None, None]]:
225
+ """Generate response `str`
226
+ Args:
227
+ prompt (str): Prompt to be send.
228
+ stream (bool, optional): Flag for streaming response. Defaults to False.
229
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
230
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
231
+ Returns:
232
+ str: Response generated
233
+ """
234
+ def for_stream():
235
+ for response in self.ask(
236
+ prompt, True, optimizer=optimizer, conversationally=conversationally
237
+ ):
238
+ yield self.get_message(response)
239
+ def for_non_stream():
240
+ return self.get_message(
241
+ self.ask(
242
+ prompt,
243
+ False,
244
+ optimizer=optimizer,
245
+ conversationally=conversationally,
246
+ )
247
+ )
248
+ return for_stream() if stream else for_non_stream()
249
+
250
+ def get_message(self, response: dict) -> str:
251
+ """Retrieves message content from response, handling both streaming and non-streaming formats."""
252
+ assert isinstance(response, dict), "Response should be of dict data-type only"
253
+ # Streaming chunk: choices[0]["delta"]["content"]
254
+ if "choices" in response and response["choices"]:
255
+ choice = response["choices"][0]
256
+ if "delta" in choice and "content" in choice["delta"]:
257
+ return choice["delta"]["content"]
258
+ if "message" in choice and "content" in choice["message"]:
259
+ return choice["message"]["content"]
260
+ # Fallback for non-standard or legacy responses
261
+ if "text" in response:
262
+ return response["text"]
263
+ return ""
264
+
265
+ def fetch_available_models(self):
266
+ """Fetches available models from the /models endpoint of all API endpoints and prints models per endpoint."""
267
+ endpoints = self.api_endpoints.copy()
268
+ random.shuffle(endpoints)
269
+ results = {}
270
+ errors = []
271
+ for endpoint in endpoints:
272
+ models_url = endpoint.replace('/v1/chat/completions', '/v1/models')
273
+ try:
274
+ response = self.session.get(models_url, timeout=self.timeout)
275
+ if response.ok:
276
+ data = response.json()
277
+ if isinstance(data, dict) and "data" in data:
278
+ models = [m["id"] if isinstance(m, dict) and "id" in m else m for m in data["data"]]
279
+ elif isinstance(data, list):
280
+ models = data
281
+ else:
282
+ models = list(data.keys()) if isinstance(data, dict) else []
283
+ results[models_url] = models
284
+ else:
285
+ errors.append(f"Failed to fetch models from {models_url}: {response.status_code} {response.text}")
286
+ except Exception as e:
287
+ errors.append(f"Error fetching from {models_url}: {e}")
288
+ if results:
289
+ for url, models in results.items():
290
+ print(f"Models from {url}:")
291
+ if models:
292
+ for m in sorted(models):
293
+ print(f" {m}")
294
+ else:
295
+ print(" No models found.")
296
+ return results
297
+ else:
298
+ print("No models found from any endpoint.")
299
+ for err in errors:
300
+ print(err)
301
+ return {}
302
+
303
+ if __name__ == "__main__":
304
+ from rich import print
305
+ chatbot = oivscode()
306
+ print(chatbot.fetch_available_models())
307
+ response = chatbot.chat(input(">>> "), stream=True)
308
+ for chunk in response:
309
+ print(chunk, end="", flush=True)
@@ -22,6 +22,7 @@ class samurai(Provider):
22
22
  "gpt-4o",
23
23
  "o3-mini",
24
24
  "Claude-sonnet-3.7",
25
+ "uncensored-r1",
25
26
  "anthropic/claude-3.5-sonnet",
26
27
  "gemini-1.5-pro",
27
28
  "gemini-1.5-pro-latest",
@@ -56,9 +57,9 @@ class samurai(Provider):
56
57
  system_prompt: str = "You are a helpful assistant."
57
58
  ):
58
59
  """Initializes the Custom API client."""
59
- self.url = "https://vmxvcq-5000.csb.app/v1/chat/completions"
60
+ self.url = "https://newapi-9qln.onrender.com/v1/chat/completions"
60
61
  self.headers = {
61
- "Authorization": "Bearer Public-Samurai-001",
62
+ "Authorization": "Bearer Samurai-AP1-Fr33",
62
63
  "Content-Type": "application/json"
63
64
  }
64
65
  self.session = Session()
@@ -18,6 +18,7 @@ class SCNet(Provider):
18
18
  {"modelId": 5, "name": "Deepseek-r1-70B"},
19
19
  {"modelId": 7, "name": "QWQ-32B"},
20
20
  {"modelId": 8, "name": "minimax-text-01-456B"},
21
+ {"modelId": 9, "name": "Qwen3-30B-A3B"}, # Added new model
21
22
  # Add more models here as needed
22
23
  ]
23
24
  MODEL_NAME_TO_ID = {m["name"]: m["modelId"] for m in AVAILABLE_MODELS}
@@ -16,12 +16,12 @@ class TypeGPT(Provider):
16
16
  """
17
17
  AVAILABLE_MODELS = [
18
18
  # Working Models (based on testing)
19
- "gpt-4o-mini-2024-07-18",
19
+ # "gpt-4o-mini-2024-07-18",
20
20
  "chatgpt-4o-latest",
21
21
  "deepseek-r1",
22
22
  "deepseek-v3",
23
23
  "uncensored-r1",
24
- "Image-Generator",
24
+ # "Image-Generator",
25
25
  ]
26
26
 
27
27
  def __init__(
@@ -35,7 +35,7 @@ class TypeGPT(Provider):
35
35
  proxies: dict = {},
36
36
  history_offset: int = 10250,
37
37
  act: str = None,
38
- model: str = "gpt-4o-mini-2024-07-18",
38
+ model: str = "chatgpt-4o-latest",
39
39
  system_prompt: str = "You are a helpful assistant.",
40
40
  temperature: float = 0.5,
41
41
  presence_penalty: int = 0,