webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,7 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from typing import List, Dict, Optional, Union, Generator, Any, TypedDict, Callable
3
3
  import json
4
+ import requests
4
5
  from dataclasses import dataclass
5
6
 
6
7
  # Import WebScout Litlogger instead of standard logging
@@ -8,13 +9,6 @@ from webscout.Litlogger import Logger, LogLevel
8
9
 
9
10
  logger = Logger(name="OpenAIBase", level=LogLevel.INFO)
10
11
 
11
- # Import the LitMeta metaclass from Litproxy
12
- try:
13
- from litproxy import LitMeta
14
- except ImportError:
15
- from .autoproxy import ProxyAutoMeta as LitMeta
16
-
17
-
18
12
  # Import the utils for response structures
19
13
  from webscout.Provider.OPENAI.utils import ChatCompletion, ChatCompletionChunk
20
14
 
@@ -182,39 +176,25 @@ class BaseChat(ABC):
182
176
  completions: BaseCompletions
183
177
 
184
178
 
185
- class OpenAICompatibleProvider(ABC, metaclass=LitMeta):
179
+ class OpenAICompatibleProvider(ABC):
186
180
  """
187
181
  Abstract Base Class for providers mimicking the OpenAI Python client structure.
188
182
  Requires a nested 'chat.completions' structure with tool support.
189
- All subclasses automatically get proxy support via LitMeta.
190
-
191
- # Available proxy helpers:
192
- # - self.get_proxied_session() - returns a requests.Session with proxies
193
- # - self.get_proxied_curl_session() - returns a curl_cffi.Session with proxies
194
- # - self.get_proxied_curl_async_session() - returns a curl_cffi.AsyncSession with proxies
195
-
196
- # Proxy support is automatically injected into:
197
- # - requests.Session objects
198
- # - httpx.Client objects
199
- # - curl_cffi.requests.Session objects
200
- # - curl_cffi.requests.AsyncSession objects
201
- #
202
- # Inbuilt auto-retry is also enabled for all requests.Session and curl_cffi.Session objects.
183
+ Users can provide their own proxies via the proxies parameter.
203
184
  """
204
185
  chat: BaseChat
205
186
  available_tools: Dict[str, Tool] = {} # Dictionary of available tools
206
187
  supports_tools: bool = False # Whether the provider supports tools
207
188
  supports_tool_choice: bool = False # Whether the provider supports tool_choice
208
189
 
209
- @abstractmethod
210
- def __init__(self, api_key: Optional[str] = None, tools: Optional[List[Tool]] = None, proxies: Optional[dict] = None, disable_auto_proxy: bool = False, **kwargs: Any):
190
+ def __init__(self, api_key: Optional[str] = None, tools: Optional[List[Tool]] = None, proxies: Optional[dict] = None, **kwargs: Any):
211
191
  self.available_tools = {}
212
192
  if tools:
213
193
  self.register_tools(tools)
214
- # self.proxies is set by ProxyAutoMeta
215
- # Subclasses should use self.proxies for all network requests
216
- # Optionally, use self.get_proxied_session() for a requests.Session with proxies
217
- # The disable_auto_proxy parameter is handled by ProxyAutoMeta
194
+ self.proxies = proxies or {}
195
+ self.session = requests.Session()
196
+ if self.proxies:
197
+ self.session.proxies.update(self.proxies)
218
198
  # raise NotImplementedError # <-- Commented out for metaclass test
219
199
 
220
200
  @property
@@ -266,4 +246,4 @@ class OpenAICompatibleProvider(ABC, metaclass=LitMeta):
266
246
  updated_messages.append(tool_message)
267
247
  break
268
248
 
269
- return updated_messages
249
+ return updated_messages
@@ -558,11 +558,16 @@ class ChatGPT(OpenAICompatibleProvider):
558
558
  """
559
559
 
560
560
  def __init__(
561
- self
561
+ self,
562
+ proxies: Optional[Dict[str, str]] = None
562
563
  ):
563
564
  """
564
565
  Initialize the ChatGPT client.
566
+
567
+ Args:
568
+ proxies: Optional proxy configuration dict, e.g. {"http": "http://proxy:8080", "https": "https://proxy:8080"}
565
569
  """
570
+ super().__init__(proxies=proxies)
566
571
  # Initialize chat interface
567
572
  self.chat = Chat(self)
568
573
 
@@ -585,4 +590,4 @@ if __name__ == "__main__":
585
590
  messages=[{"role": "user", "content": "How manr r in strawberry"}]
586
591
  )
587
592
  print(response.choices[0].message.content)
588
- print()
593
+ print()
@@ -373,7 +373,8 @@ class ChatGPTClone(OpenAICompatibleProvider):
373
373
  def __init__(
374
374
  self,
375
375
  browser: str = "chrome",
376
- impersonate: str = "chrome120"
376
+ impersonate: str = "chrome120",
377
+ proxies: Optional[Dict[str, str]] = None
377
378
  ):
378
379
  """
379
380
  Initialize the ChatGPTClone client.
@@ -381,15 +382,13 @@ class ChatGPTClone(OpenAICompatibleProvider):
381
382
  Args:
382
383
  browser: Browser to emulate in user agent (for LitAgent fallback)
383
384
  impersonate: Browser impersonation for curl_cffi (default: chrome120)
385
+ proxies: Optional proxy configuration dict, e.g. {"http": "http://proxy:8080", "https": "https://proxy:8080"}
384
386
  """
387
+ super().__init__(proxies=proxies)
385
388
  self.timeout = 30
386
389
  self.temperature = 0.6 # Default temperature
387
390
  self.top_p = 0.7 # Default top_p
388
391
 
389
- # Use curl_cffi for Cloudflare bypass and browser impersonation
390
- self.session = Session(impersonate=impersonate)
391
- self.session.proxies = {}
392
-
393
392
  # Use LitAgent for fingerprint if available, else fallback
394
393
  agent = LitAgent()
395
394
  self.fingerprint = agent.generate_fingerprint(browser)
@@ -520,5 +519,3 @@ if __name__ == "__main__":
520
519
  )
521
520
  print(response.choices[0].message.content)
522
521
  print()
523
- print("Proxies on instance:", client.proxies)
524
- print("Proxies on session:", client.session.proxies)
@@ -2,8 +2,8 @@ from typing import List, Dict, Optional, Union, Generator, Any
2
2
  import time
3
3
  import json
4
4
  from webscout.litagent import LitAgent
5
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
6
- from .utils import (
5
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
6
+ from webscout.Provider.OPENAI.utils import (
7
7
  ChatCompletion,
8
8
  ChatCompletionChunk,
9
9
  Choice,
@@ -13,7 +13,10 @@ from .utils import (
13
13
  format_prompt,
14
14
  count_tokens
15
15
  )
16
- import requests
16
+ from curl_cffi.requests import Session
17
+ from curl_cffi.const import CurlHttpVersion
18
+ from webscout.AIutel import sanitize_stream
19
+ from webscout import exceptions
17
20
 
18
21
  # ANSI escape codes for formatting
19
22
  BOLD = "\033[1m"
@@ -24,6 +27,19 @@ class Completions(BaseCompletions):
24
27
  def __init__(self, client: 'ChatSandbox'):
25
28
  self._client = client
26
29
 
30
+ @staticmethod
31
+ def _chatsandbox_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
32
+ """Extracts content from the chatsandbox stream format."""
33
+ if isinstance(chunk, str):
34
+ try:
35
+ data = json.loads(chunk)
36
+ if isinstance(data, dict) and "reasoning_content" in data:
37
+ return data["reasoning_content"]
38
+ return chunk
39
+ except json.JSONDecodeError:
40
+ return chunk
41
+ return None
42
+
27
43
  def create(
28
44
  self,
29
45
  *,
@@ -43,7 +59,7 @@ class Completions(BaseCompletions):
43
59
  # Use model name conversion for compatibility
44
60
  model = self._client.convert_model_name(model)
45
61
  # Compose the conversation prompt using format_prompt
46
- question = format_prompt(messages, add_special_tokens=True)
62
+ question = format_prompt(messages, add_special_tokens=False, do_continue=True)
47
63
  payload = {
48
64
  "messages": [question],
49
65
  "character": model
@@ -70,7 +86,7 @@ class Completions(BaseCompletions):
70
86
  'dnt': '1',
71
87
  'sec-gpc': '1',
72
88
  }
73
- session = requests.Session()
89
+ session = Session()
74
90
  session.headers.update(headers)
75
91
  session.proxies = proxies if proxies is not None else {}
76
92
 
@@ -80,29 +96,37 @@ class Completions(BaseCompletions):
80
96
  url,
81
97
  json=payload,
82
98
  stream=True,
83
- timeout=timeout if timeout is not None else 30
99
+ timeout=timeout if timeout is not None else 30,
100
+ impersonate="chrome120",
101
+ http_version=CurlHttpVersion.V1_1
84
102
  )
85
- response.raise_for_status()
86
- streaming_text = ""
87
- for chunk in response.iter_content(chunk_size=None):
88
- if not chunk:
89
- continue
90
- text = chunk.decode('utf-8', errors='replace')
91
- try:
92
- data = json.loads(text)
93
- content = data.get("reasoning_content", text)
94
- except Exception:
95
- content = text
96
- streaming_text += content
97
- delta = ChoiceDelta(content=content)
98
- choice = Choice(index=0, delta=delta, finish_reason=None)
99
- chunk_obj = ChatCompletionChunk(
100
- id=request_id,
101
- choices=[choice],
102
- created=created_time,
103
- model=model,
103
+ if not response.ok:
104
+ raise exceptions.FailedToGenerateResponseError(
105
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
104
106
  )
105
- yield chunk_obj
107
+
108
+ streaming_text = ""
109
+ # Use sanitize_stream with the custom extractor
110
+ processed_stream = sanitize_stream(
111
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
112
+ intro_value=None, # No simple prefix to remove here
113
+ to_json=False, # Content is not JSON
114
+ content_extractor=self._chatsandbox_extractor # Use the specific extractor
115
+ )
116
+
117
+ for content_chunk in processed_stream:
118
+ if content_chunk and isinstance(content_chunk, str):
119
+ streaming_text += content_chunk
120
+ delta = ChoiceDelta(content=content_chunk)
121
+ choice = Choice(index=0, delta=delta, finish_reason=None)
122
+ chunk_obj = ChatCompletionChunk(
123
+ id=request_id,
124
+ choices=[choice],
125
+ created=created_time,
126
+ model=model,
127
+ )
128
+ yield chunk_obj
129
+
106
130
  # Final chunk
107
131
  delta = ChoiceDelta(content=None)
108
132
  choice = Choice(index=0, delta=delta, finish_reason="stop")
@@ -116,39 +140,28 @@ class Completions(BaseCompletions):
116
140
  except Exception as e:
117
141
  raise RuntimeError(f"ChatSandbox streaming request failed: {e}")
118
142
  def for_non_stream():
119
- try:
120
- response = session.post(
121
- url,
122
- json=payload,
123
- timeout=timeout if timeout is not None else 30
124
- )
125
- response.raise_for_status()
126
- text = response.text
127
- try:
128
- data = json.loads(text)
129
- content = data.get("reasoning_content", text)
130
- except Exception:
131
- content = text
132
- prompt_tokens = count_tokens(question)
133
- completion_tokens = count_tokens(content)
134
- total_tokens = prompt_tokens + completion_tokens
135
- usage = CompletionUsage(
136
- prompt_tokens=prompt_tokens,
137
- completion_tokens=completion_tokens,
138
- total_tokens=total_tokens
139
- )
140
- message = ChatCompletionMessage(role="assistant", content=content)
141
- choice = Choice(index=0, message=message, finish_reason="stop")
142
- completion = ChatCompletion(
143
- id=request_id,
144
- choices=[choice],
145
- created=created_time,
146
- model=model,
147
- usage=usage,
148
- )
149
- return completion
150
- except Exception as e:
151
- raise RuntimeError(f"ChatSandbox request failed: {e}")
143
+ streaming_text = ""
144
+ for chunk_obj in for_stream():
145
+ if chunk_obj.choices[0].delta.content:
146
+ streaming_text += chunk_obj.choices[0].delta.content
147
+ prompt_tokens = count_tokens(question)
148
+ completion_tokens = count_tokens(streaming_text)
149
+ total_tokens = prompt_tokens + completion_tokens
150
+ usage = CompletionUsage(
151
+ prompt_tokens=prompt_tokens,
152
+ completion_tokens=completion_tokens,
153
+ total_tokens=total_tokens
154
+ )
155
+ message = ChatCompletionMessage(role="assistant", content=streaming_text)
156
+ choice = Choice(index=0, message=message, finish_reason="stop")
157
+ completion = ChatCompletion(
158
+ id=request_id,
159
+ choices=[choice],
160
+ created=created_time,
161
+ model=model,
162
+ usage=usage,
163
+ )
164
+ return completion
152
165
  return for_stream() if stream else for_non_stream()
153
166
 
154
167
  class Chat(BaseChat):
@@ -175,3 +188,15 @@ class ChatSandbox(OpenAICompatibleProvider):
175
188
  # Default to openai if no match
176
189
  print(f"{RED}{BOLD}Warning: Model '{model}' not found, using default model 'openai'{RESET}")
177
190
  return "openai"
191
+
192
+ if __name__ == "__main__":
193
+ client = ChatSandbox()
194
+ response = client.chat.completions.create(
195
+ model="openai",
196
+ messages=[
197
+ {"role": "system", "content": "You are a helpful assistant."},
198
+ {"role": "user", "content": "Explain the theory of relativity in simple terms."}
199
+ ],
200
+ stream=False
201
+ )
202
+ print(response.choices[0].message.content)
@@ -195,9 +195,10 @@ class Chat(BaseChat):
195
195
 
196
196
  class DeepInfra(OpenAICompatibleProvider):
197
197
  AVAILABLE_MODELS = [
198
- "anthropic/claude-4-opus",
199
198
  "moonshotai/Kimi-K2-Instruct",
200
- "anthropic/claude-4-sonnet",
199
+ "Qwen/Qwen3-Next-80B-A3B-Instruct",
200
+ "Qwen/Qwen3-Next-80B-A3B-Thinking",
201
+ "moonshotai/Kimi-K2-Instruct-0905",
201
202
  "deepseek-ai/DeepSeek-R1-0528-Turbo",
202
203
  "Qwen/Qwen3-235B-A22B-Thinking-2507",
203
204
  "Qwen/Qwen3-Coder-480B-A35B-Instruct",
@@ -217,14 +218,13 @@ class DeepInfra(OpenAICompatibleProvider):
217
218
  "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
218
219
  "microsoft/phi-4-reasoning-plus",
219
220
  "Qwen/QwQ-32B",
220
- "google/gemini-2.5-flash",
221
- "google/gemini-2.5-pro",
222
221
  "google/gemma-3-27b-it",
223
222
  "google/gemma-3-12b-it",
224
223
  "google/gemma-3-4b-it",
225
224
  "microsoft/Phi-4-multimodal-instruct",
226
225
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
227
226
  "deepseek-ai/DeepSeek-V3",
227
+ "deepseek-ai/DeepSeek-V3.1",
228
228
  "meta-llama/Llama-3.3-70B-Instruct-Turbo",
229
229
  "meta-llama/Llama-3.3-70B-Instruct",
230
230
  "microsoft/phi-4",
@@ -238,7 +238,6 @@ class DeepInfra(OpenAICompatibleProvider):
238
238
  "Sao10K/L3-8B-Lunaris-v1-Turbo",
239
239
  "Sao10K/L3.1-70B-Euryale-v2.2",
240
240
  "Sao10K/L3.3-70B-Euryale-v2.3",
241
- "anthropic/claude-3-7-sonnet-latest",
242
241
  "deepseek-ai/DeepSeek-R1",
243
242
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
244
243
  "deepseek-ai/DeepSeek-R1-Turbo",
@@ -268,6 +267,7 @@ class DeepInfra(OpenAICompatibleProvider):
268
267
  "openai/gpt-oss-20b",
269
268
  "allenai/olmOCR-7B-0725-FP8",
270
269
  ]
270
+
271
271
  def __init__(self, browser: str = "chrome", api_key: str = None):
272
272
  self.timeout = None
273
273
  self.base_url = "https://api.deepinfra.com/v1/openai/chat/completions"
@@ -312,4 +312,4 @@ if __name__ == "__main__":
312
312
  max_tokens=10000,
313
313
  stream=False
314
314
  )
315
- print(response)
315
+ print(response.choices[0].message.content)
@@ -220,7 +220,10 @@ class HeckAI(OpenAICompatibleProvider):
220
220
  "openai/gpt-4o-mini",
221
221
  "openai/gpt-4.1-mini",
222
222
  "x-ai/grok-3-mini-beta",
223
- "meta-llama/llama-4-scout"
223
+ "meta-llama/llama-4-scout",
224
+ "openai/gpt-5-mini",
225
+ "openai/gpt-5-nano"
226
+
224
227
  ]
225
228
 
226
229
  def __init__(
@@ -204,6 +204,7 @@ class Netwrck(OpenAICompatibleProvider):
204
204
 
205
205
  AVAILABLE_MODELS = [
206
206
  "thedrummer/valkyrie-49b-v1",
207
+ "thedrummer/skyfall-36b-v2",
207
208
  "sao10k/l3-euryale-70b",
208
209
  "deepseek/deepseek-chat",
209
210
  "deepseek/deepseek-r1",
@@ -336,6 +336,9 @@ class SciraChat(OpenAICompatibleProvider):
336
336
  "o3": "scira-o3",
337
337
  "qwen/qwen3-32b": "scira-qwen-32b",
338
338
  "qwen3-30b-a3b": "scira-qwen-30b",
339
+ "qwen3-4b": "scira-qwen-4b",
340
+ "qwen3-32b": "scira-qwen-32b",
341
+ "qwen3-4b-thinking": "scira-qwen-4b-thinking",
339
342
  "deepseek-v3-0324": "scira-deepseek-v3",
340
343
  "claude-3-5-haiku-20241022": "scira-haiku",
341
344
  "mistral-small-latest": "scira-mistral",
@@ -346,6 +349,7 @@ class SciraChat(OpenAICompatibleProvider):
346
349
  "claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
347
350
  "claude-4-opus-20250514": "scira-opus",
348
351
  "claude-4-opus-20250514-pro": "scira-opus-pro",
352
+ "llama-4-maverick": "scira-llama-4",
349
353
  "meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
350
354
  "kimi-k2-instruct": "scira-kimi-k2",
351
355
  "scira-kimi-k2": "kimi-k2-instruct",
@@ -360,6 +364,8 @@ class SciraChat(OpenAICompatibleProvider):
360
364
  SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
361
365
  SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
362
366
  SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
367
+ SCIRA_TO_MODEL["scira-qwen-4b"] = "qwen3-4b"
368
+ SCIRA_TO_MODEL["scira-qwen-4b-thinking"] = "qwen3-4b-thinking"
363
369
  SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
364
370
  SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
365
371
  SCIRA_TO_MODEL["scira-kimi-k2"] = "kimi-k2-instruct"
@@ -277,28 +277,20 @@ class TextPollinations(OpenAICompatibleProvider):
277
277
 
278
278
  AVAILABLE_MODELS = [
279
279
  "deepseek-reasoning",
280
- "glm",
281
- "gpt-5-nano",
282
- "llama-fast-roblox",
283
- "llama-roblox",
284
- "llamascout",
280
+ "gemini",
285
281
  "mistral",
286
- "mistral-nemo-roblox",
287
- "mistral-roblox",
288
282
  "nova-fast",
289
283
  "openai",
290
284
  "openai-audio",
291
285
  "openai-fast",
292
- "openai-large",
293
- "openai-roblox",
286
+ "openai-reasoning",
294
287
  "qwen-coder",
288
+ "roblox-rp",
295
289
  "bidara",
296
290
  "evil",
297
- "hypnosis-tracy",
298
291
  "midijourney",
299
292
  "mirexa",
300
293
  "rtist",
301
- "sur",
302
294
  "unity",
303
295
  ]
304
296
 
@@ -291,27 +291,30 @@ class Toolbaz(OpenAICompatibleProvider):
291
291
 
292
292
  AVAILABLE_MODELS = [
293
293
  "gemini-2.5-flash",
294
+ "gemini-2.5-pro",
294
295
  "gemini-2.0-flash-thinking",
295
- "sonar",
296
296
  "gemini-2.0-flash",
297
- "gemini-1.5-flash",
297
+
298
+ "claude-sonnet-4",
299
+
300
+ "gpt-5",
301
+ "gpt-oss-120b",
298
302
  "o3-mini",
299
303
  "gpt-4o-latest",
300
- "gpt-4o",
304
+
305
+ "toolbaz_v4",
306
+ "toolbaz_v3.5_pro",
307
+
301
308
  "deepseek-r1",
309
+ "deepseek-v3.1",
310
+ "deepseek-v3",
311
+
302
312
  "Llama-4-Maverick",
303
- "Llama-4-Scout",
304
313
  "Llama-3.3-70B",
305
- "gpt-oss-120b",
306
- "Qwen2.5-72B",
307
- "grok-2-1212",
308
- "grok-3-beta",
309
- "toolbaz_v3.5_pro",
310
- "toolbaz_v3",
314
+
311
315
  "mixtral_8x22b",
312
316
  "L3-70B-Euryale-v2.1",
313
317
  "midnight-rose",
314
- "unity",
315
318
  "unfiltered_x"
316
319
  ]
317
320
 
@@ -15,7 +15,7 @@ class OpenGPT(Provider):
15
15
  """
16
16
  A class to interact with the Open-GPT API.
17
17
  """
18
-
18
+ required_auth = False
19
19
  def __init__(
20
20
  self,
21
21
  is_conversation: bool = True,