webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +13 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +4 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +6 -8
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +52 -57
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -56
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +12 -6
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +9 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -14
- webscout/Provider/OPENAI/toolbaz.py +14 -10
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +10 -9
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +18 -11
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -15
- webscout/Provider/TogetherAI.py +136 -142
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -174
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +194 -38
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -11
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/conversation.py +22 -20
- webscout/sanitize.py +14 -10
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/copilot.py +0 -305
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -422
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
from regex import R
|
|
2
|
+
import requests
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import uuid
|
|
6
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
|
+
|
|
8
|
+
from webscout.Provider.Deepinfra import DeepInfra
|
|
9
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
+
from webscout.Provider.OPENAI.utils import (
|
|
11
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
+
ChatCompletionMessage, CompletionUsage
|
|
13
|
+
)
|
|
14
|
+
try:
|
|
15
|
+
from .generate_api_key import generate_full_api_key
|
|
16
|
+
except ImportError:
|
|
17
|
+
# Fallback: define the function inline if import fails
|
|
18
|
+
import random
|
|
19
|
+
import string
|
|
20
|
+
|
|
21
|
+
def generate_api_key_suffix(length: int = 4) -> str:
|
|
22
|
+
"""Generate a random API key suffix like 'C1Z5'"""
|
|
23
|
+
chars = string.ascii_uppercase + string.digits
|
|
24
|
+
return ''.join(random.choice(chars) for _ in range(length))
|
|
25
|
+
|
|
26
|
+
def generate_full_api_key(prefix: str = "EU1CW20nX5oau42xBSgm") -> str:
|
|
27
|
+
"""Generate a full API key with a random suffix"""
|
|
28
|
+
suffix = generate_api_key_suffix(4)
|
|
29
|
+
return prefix + suffix
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
from webscout.litagent import LitAgent
|
|
33
|
+
except ImportError:
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
class Completions(BaseCompletions):
|
|
37
|
+
def __init__(self, client: 'Refact'):
|
|
38
|
+
self._client = client
|
|
39
|
+
|
|
40
|
+
def create(
|
|
41
|
+
self,
|
|
42
|
+
*,
|
|
43
|
+
model: str,
|
|
44
|
+
messages: List[Dict[str, str]],
|
|
45
|
+
max_tokens: Optional[int] = 2049,
|
|
46
|
+
stream: bool = False,
|
|
47
|
+
temperature: Optional[float] = None,
|
|
48
|
+
top_p: Optional[float] = None,
|
|
49
|
+
timeout: Optional[int] = None,
|
|
50
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
51
|
+
**kwargs: Any
|
|
52
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
53
|
+
payload = {
|
|
54
|
+
"model": model,
|
|
55
|
+
"messages": messages,
|
|
56
|
+
"max_tokens": max_tokens,
|
|
57
|
+
"stream": stream,
|
|
58
|
+
}
|
|
59
|
+
if temperature is not None:
|
|
60
|
+
payload["temperature"] = temperature
|
|
61
|
+
if top_p is not None:
|
|
62
|
+
payload["top_p"] = top_p
|
|
63
|
+
payload.update(kwargs)
|
|
64
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
65
|
+
created_time = int(time.time())
|
|
66
|
+
if stream:
|
|
67
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
68
|
+
else:
|
|
69
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
70
|
+
|
|
71
|
+
def _create_stream(
|
|
72
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
73
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
74
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
75
|
+
try:
|
|
76
|
+
response = self._client.session.post(
|
|
77
|
+
self._client.base_url,
|
|
78
|
+
headers=self._client.headers,
|
|
79
|
+
json=payload,
|
|
80
|
+
stream=True,
|
|
81
|
+
timeout=timeout or self._client.timeout,
|
|
82
|
+
proxies=proxies
|
|
83
|
+
)
|
|
84
|
+
response.raise_for_status()
|
|
85
|
+
prompt_tokens = 0
|
|
86
|
+
completion_tokens = 0
|
|
87
|
+
total_tokens = 0
|
|
88
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
89
|
+
if line:
|
|
90
|
+
if line.startswith("data: "):
|
|
91
|
+
json_str = line[6:]
|
|
92
|
+
if json_str == "[DONE]":
|
|
93
|
+
break
|
|
94
|
+
try:
|
|
95
|
+
data = json.loads(json_str)
|
|
96
|
+
choice_data = data.get('choices', [{}])[0]
|
|
97
|
+
delta_data = choice_data.get('delta', {})
|
|
98
|
+
finish_reason = choice_data.get('finish_reason')
|
|
99
|
+
usage_data = data.get('usage', {})
|
|
100
|
+
if usage_data:
|
|
101
|
+
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
102
|
+
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
103
|
+
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
104
|
+
if delta_data.get('content'):
|
|
105
|
+
completion_tokens += 1
|
|
106
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
107
|
+
delta = ChoiceDelta(
|
|
108
|
+
content=delta_data.get('content'),
|
|
109
|
+
role=delta_data.get('role'),
|
|
110
|
+
tool_calls=delta_data.get('tool_calls')
|
|
111
|
+
)
|
|
112
|
+
choice = Choice(
|
|
113
|
+
index=choice_data.get('index', 0),
|
|
114
|
+
delta=delta,
|
|
115
|
+
finish_reason=finish_reason,
|
|
116
|
+
logprobs=choice_data.get('logprobs')
|
|
117
|
+
)
|
|
118
|
+
chunk = ChatCompletionChunk(
|
|
119
|
+
id=request_id,
|
|
120
|
+
choices=[choice],
|
|
121
|
+
created=created_time,
|
|
122
|
+
model=model,
|
|
123
|
+
system_fingerprint=data.get('system_fingerprint')
|
|
124
|
+
)
|
|
125
|
+
chunk.usage = {
|
|
126
|
+
"prompt_tokens": prompt_tokens,
|
|
127
|
+
"completion_tokens": completion_tokens,
|
|
128
|
+
"total_tokens": total_tokens,
|
|
129
|
+
"estimated_cost": None
|
|
130
|
+
}
|
|
131
|
+
yield chunk
|
|
132
|
+
except json.JSONDecodeError:
|
|
133
|
+
continue
|
|
134
|
+
# Final chunk with finish_reason="stop"
|
|
135
|
+
delta = ChoiceDelta(content=None, role=None, tool_calls=None)
|
|
136
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
|
|
137
|
+
chunk = ChatCompletionChunk(
|
|
138
|
+
id=request_id,
|
|
139
|
+
choices=[choice],
|
|
140
|
+
created=created_time,
|
|
141
|
+
model=model,
|
|
142
|
+
system_fingerprint=None
|
|
143
|
+
)
|
|
144
|
+
chunk.usage = {
|
|
145
|
+
"prompt_tokens": prompt_tokens,
|
|
146
|
+
"completion_tokens": completion_tokens,
|
|
147
|
+
"total_tokens": total_tokens,
|
|
148
|
+
"estimated_cost": None
|
|
149
|
+
}
|
|
150
|
+
yield chunk
|
|
151
|
+
except Exception as e:
|
|
152
|
+
print(f"Error during Refact stream request: {e}")
|
|
153
|
+
raise IOError(f"Refact request failed: {e}") from e
|
|
154
|
+
|
|
155
|
+
def _create_non_stream(
|
|
156
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
157
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
158
|
+
) -> ChatCompletion:
|
|
159
|
+
try:
|
|
160
|
+
response = self._client.session.post(
|
|
161
|
+
self._client.base_url,
|
|
162
|
+
headers=self._client.headers,
|
|
163
|
+
json=payload,
|
|
164
|
+
timeout=timeout or self._client.timeout,
|
|
165
|
+
proxies=proxies
|
|
166
|
+
)
|
|
167
|
+
response.raise_for_status()
|
|
168
|
+
data = response.json()
|
|
169
|
+
choices_data = data.get('choices', [])
|
|
170
|
+
usage_data = data.get('usage', {})
|
|
171
|
+
choices = []
|
|
172
|
+
for choice_d in choices_data:
|
|
173
|
+
message_d = choice_d.get('message')
|
|
174
|
+
if not message_d and 'delta' in choice_d:
|
|
175
|
+
delta = choice_d['delta']
|
|
176
|
+
message_d = {
|
|
177
|
+
'role': delta.get('role', 'assistant'),
|
|
178
|
+
'content': delta.get('content', '')
|
|
179
|
+
}
|
|
180
|
+
if not message_d:
|
|
181
|
+
message_d = {'role': 'assistant', 'content': ''}
|
|
182
|
+
message = ChatCompletionMessage(
|
|
183
|
+
role=message_d.get('role', 'assistant'),
|
|
184
|
+
content=message_d.get('content', '')
|
|
185
|
+
)
|
|
186
|
+
choice = Choice(
|
|
187
|
+
index=choice_d.get('index', 0),
|
|
188
|
+
message=message,
|
|
189
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
190
|
+
)
|
|
191
|
+
choices.append(choice)
|
|
192
|
+
usage = CompletionUsage(
|
|
193
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
194
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
195
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
196
|
+
)
|
|
197
|
+
completion = ChatCompletion(
|
|
198
|
+
id=request_id,
|
|
199
|
+
choices=choices,
|
|
200
|
+
created=created_time,
|
|
201
|
+
model=data.get('model', model),
|
|
202
|
+
usage=usage,
|
|
203
|
+
)
|
|
204
|
+
return completion
|
|
205
|
+
except Exception as e:
|
|
206
|
+
print(f"Error during Refact non-stream request: {e}")
|
|
207
|
+
raise IOError(f"Refact request failed: {e}") from e
|
|
208
|
+
|
|
209
|
+
class Chat(BaseChat):
|
|
210
|
+
def __init__(self, client: 'Refact'):
|
|
211
|
+
self.completions = Completions(client)
|
|
212
|
+
|
|
213
|
+
class Refact(OpenAICompatibleProvider):
|
|
214
|
+
AVAILABLE_MODELS = [
|
|
215
|
+
"gpt-4o",
|
|
216
|
+
"gpt-4o-mini",
|
|
217
|
+
"o4-mini",
|
|
218
|
+
"gpt-4.1",
|
|
219
|
+
"gpt-4.1-mini",
|
|
220
|
+
"gpt-4.1-nano",
|
|
221
|
+
"gpt-5",
|
|
222
|
+
"gpt-5-mini",
|
|
223
|
+
"gpt-5-nano",
|
|
224
|
+
"claude-sonnet-4",
|
|
225
|
+
"claude-opus-4",
|
|
226
|
+
"claude-opus-4.1",
|
|
227
|
+
"gemini-2.5-pro",
|
|
228
|
+
"gemini-2.5-pro-preview"
|
|
229
|
+
]
|
|
230
|
+
|
|
231
|
+
def __init__(self, api_key: str = None, browser: str = "chrome"):
|
|
232
|
+
# Mirror DeepInfra constructor signature but use the lightweight headers from lol.py
|
|
233
|
+
self.timeout = None
|
|
234
|
+
self.base_url = "https://inference.smallcloud.ai/v1/chat/completions"
|
|
235
|
+
self.session = requests.Session()
|
|
236
|
+
|
|
237
|
+
# Use minimal headers consistent with lol.py
|
|
238
|
+
self.headers = {
|
|
239
|
+
"Content-Type": "application/json",
|
|
240
|
+
"User-Agent": "refact-lsp 0.10.19",
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
if api_key:
|
|
244
|
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
245
|
+
else:
|
|
246
|
+
self.headers["Authorization"] = f"Bearer {generate_full_api_key()}"
|
|
247
|
+
|
|
248
|
+
# Try to initialize LitAgent for compatibility, but do not alter headers (keep lol.py style)
|
|
249
|
+
try:
|
|
250
|
+
_ = LitAgent()
|
|
251
|
+
except Exception:
|
|
252
|
+
pass
|
|
253
|
+
|
|
254
|
+
self.session.headers.update(self.headers)
|
|
255
|
+
self.chat = Chat(self)
|
|
256
|
+
|
|
257
|
+
@property
|
|
258
|
+
def models(self):
|
|
259
|
+
class _ModelList:
|
|
260
|
+
def list(inner_self):
|
|
261
|
+
return type(self).AVAILABLE_MODELS
|
|
262
|
+
return _ModelList()
|
|
263
|
+
|
|
264
|
+
if __name__ == "__main__":
|
|
265
|
+
client = Refact()
|
|
266
|
+
response = client.chat.completions.create(
|
|
267
|
+
model="claude-opus-4.1",
|
|
268
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
269
|
+
max_tokens=10000,
|
|
270
|
+
stream=True
|
|
271
|
+
)
|
|
272
|
+
for chunk in response:
|
|
273
|
+
if chunk.choices[0].delta.content:
|
|
274
|
+
print(chunk.choices[0].delta.content, end='', flush=True)
|
|
@@ -336,6 +336,9 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
336
336
|
"o3": "scira-o3",
|
|
337
337
|
"qwen/qwen3-32b": "scira-qwen-32b",
|
|
338
338
|
"qwen3-30b-a3b": "scira-qwen-30b",
|
|
339
|
+
"qwen3-4b": "scira-qwen-4b",
|
|
340
|
+
"qwen3-32b": "scira-qwen-32b",
|
|
341
|
+
"qwen3-4b-thinking": "scira-qwen-4b-thinking",
|
|
339
342
|
"deepseek-v3-0324": "scira-deepseek-v3",
|
|
340
343
|
"claude-3-5-haiku-20241022": "scira-haiku",
|
|
341
344
|
"mistral-small-latest": "scira-mistral",
|
|
@@ -346,6 +349,7 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
346
349
|
"claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
|
|
347
350
|
"claude-4-opus-20250514": "scira-opus",
|
|
348
351
|
"claude-4-opus-20250514-pro": "scira-opus-pro",
|
|
352
|
+
"llama-4-maverick": "scira-llama-4",
|
|
349
353
|
"meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
|
|
350
354
|
"kimi-k2-instruct": "scira-kimi-k2",
|
|
351
355
|
"scira-kimi-k2": "kimi-k2-instruct",
|
|
@@ -360,6 +364,8 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
360
364
|
SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
|
|
361
365
|
SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
|
|
362
366
|
SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
|
|
367
|
+
SCIRA_TO_MODEL["scira-qwen-4b"] = "qwen3-4b"
|
|
368
|
+
SCIRA_TO_MODEL["scira-qwen-4b-thinking"] = "qwen3-4b-thinking"
|
|
363
369
|
SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
|
|
364
370
|
SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
|
|
365
371
|
SCIRA_TO_MODEL["scira-kimi-k2"] = "kimi-k2-instruct"
|
|
@@ -276,32 +276,21 @@ class TextPollinations(OpenAICompatibleProvider):
|
|
|
276
276
|
"""
|
|
277
277
|
|
|
278
278
|
AVAILABLE_MODELS = [
|
|
279
|
-
"deepseek",
|
|
280
279
|
"deepseek-reasoning",
|
|
281
|
-
"
|
|
282
|
-
"grok",
|
|
283
|
-
"llama-fast-roblox",
|
|
284
|
-
"llama-roblox",
|
|
285
|
-
"llamascout",
|
|
280
|
+
"gemini",
|
|
286
281
|
"mistral",
|
|
287
|
-
"
|
|
288
|
-
"mistral-roblox",
|
|
282
|
+
"nova-fast",
|
|
289
283
|
"openai",
|
|
290
284
|
"openai-audio",
|
|
291
285
|
"openai-fast",
|
|
292
|
-
"openai-large",
|
|
293
286
|
"openai-reasoning",
|
|
294
|
-
"openai-roblox",
|
|
295
|
-
"phi",
|
|
296
287
|
"qwen-coder",
|
|
288
|
+
"roblox-rp",
|
|
297
289
|
"bidara",
|
|
298
|
-
"elixposearch",
|
|
299
290
|
"evil",
|
|
300
|
-
"hypnosis-tracy",
|
|
301
291
|
"midijourney",
|
|
302
292
|
"mirexa",
|
|
303
293
|
"rtist",
|
|
304
|
-
"sur",
|
|
305
294
|
"unity",
|
|
306
295
|
]
|
|
307
296
|
|
|
@@ -291,26 +291,30 @@ class Toolbaz(OpenAICompatibleProvider):
|
|
|
291
291
|
|
|
292
292
|
AVAILABLE_MODELS = [
|
|
293
293
|
"gemini-2.5-flash",
|
|
294
|
+
"gemini-2.5-pro",
|
|
294
295
|
"gemini-2.0-flash-thinking",
|
|
295
|
-
"sonar",
|
|
296
296
|
"gemini-2.0-flash",
|
|
297
|
-
|
|
297
|
+
|
|
298
|
+
"claude-sonnet-4",
|
|
299
|
+
|
|
300
|
+
"gpt-5",
|
|
301
|
+
"gpt-oss-120b",
|
|
298
302
|
"o3-mini",
|
|
299
303
|
"gpt-4o-latest",
|
|
300
|
-
|
|
304
|
+
|
|
305
|
+
"toolbaz_v4",
|
|
306
|
+
"toolbaz_v3.5_pro",
|
|
307
|
+
|
|
301
308
|
"deepseek-r1",
|
|
309
|
+
"deepseek-v3.1",
|
|
310
|
+
"deepseek-v3",
|
|
311
|
+
|
|
302
312
|
"Llama-4-Maverick",
|
|
303
|
-
"Llama-4-Scout",
|
|
304
313
|
"Llama-3.3-70B",
|
|
305
|
-
|
|
306
|
-
"grok-2-1212",
|
|
307
|
-
"grok-3-beta",
|
|
308
|
-
"toolbaz_v3.5_pro",
|
|
309
|
-
"toolbaz_v3",
|
|
314
|
+
|
|
310
315
|
"mixtral_8x22b",
|
|
311
316
|
"L3-70B-Euryale-v2.1",
|
|
312
317
|
"midnight-rose",
|
|
313
|
-
"unity",
|
|
314
318
|
"unfiltered_x"
|
|
315
319
|
]
|
|
316
320
|
|