webscout 8.2.9__py3-none-any.whl → 8.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +6 -6
- webscout/AIbase.py +61 -1
- webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
- webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
- webscout/Extra/YTToolkit/ytapi/video.py +10 -10
- webscout/Extra/autocoder/autocoder_utiles.py +1 -1
- webscout/Litlogger/formats.py +9 -0
- webscout/Litlogger/handlers.py +18 -0
- webscout/Litlogger/logger.py +43 -1
- webscout/Provider/AISEARCH/scira_search.py +3 -2
- webscout/Provider/Blackboxai.py +2 -0
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +1 -1
- webscout/Provider/HeckAI.py +1 -1
- webscout/Provider/LambdaChat.py +8 -1
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +396 -113
- webscout/Provider/OPENAI/Cloudflare.py +31 -14
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +29 -13
- webscout/Provider/OPENAI/NEMOTRON.py +26 -14
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +161 -140
- webscout/Provider/OPENAI/README.md +3 -0
- webscout/Provider/OPENAI/TogetherAI.py +355 -0
- webscout/Provider/OPENAI/TwoAI.py +29 -12
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +33 -23
- webscout/Provider/OPENAI/api.py +375 -24
- webscout/Provider/OPENAI/autoproxy.py +39 -0
- webscout/Provider/OPENAI/base.py +91 -12
- webscout/Provider/OPENAI/c4ai.py +31 -10
- webscout/Provider/OPENAI/chatgpt.py +56 -24
- webscout/Provider/OPENAI/chatgptclone.py +46 -16
- webscout/Provider/OPENAI/chatsandbox.py +7 -3
- webscout/Provider/OPENAI/copilot.py +26 -10
- webscout/Provider/OPENAI/deepinfra.py +29 -12
- webscout/Provider/OPENAI/e2b.py +358 -158
- webscout/Provider/OPENAI/exaai.py +13 -10
- webscout/Provider/OPENAI/exachat.py +10 -6
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +10 -6
- webscout/Provider/OPENAI/glider.py +10 -6
- webscout/Provider/OPENAI/heckai.py +11 -8
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +10 -7
- webscout/Provider/OPENAI/multichat.py +3 -1
- webscout/Provider/OPENAI/netwrck.py +10 -6
- webscout/Provider/OPENAI/oivscode.py +12 -9
- webscout/Provider/OPENAI/opkfc.py +31 -8
- webscout/Provider/OPENAI/scirachat.py +17 -10
- webscout/Provider/OPENAI/sonus.py +10 -6
- webscout/Provider/OPENAI/standardinput.py +18 -9
- webscout/Provider/OPENAI/textpollinations.py +14 -7
- webscout/Provider/OPENAI/toolbaz.py +16 -11
- webscout/Provider/OPENAI/typefully.py +14 -7
- webscout/Provider/OPENAI/typegpt.py +10 -6
- webscout/Provider/OPENAI/uncovrAI.py +22 -8
- webscout/Provider/OPENAI/venice.py +10 -6
- webscout/Provider/OPENAI/writecream.py +13 -10
- webscout/Provider/OPENAI/x0gpt.py +11 -9
- webscout/Provider/OPENAI/yep.py +12 -10
- webscout/Provider/PI.py +2 -1
- webscout/Provider/STT/__init__.py +3 -0
- webscout/Provider/STT/base.py +281 -0
- webscout/Provider/STT/elevenlabs.py +265 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +399 -365
- webscout/Provider/TTI/base.py +74 -2
- webscout/Provider/TTI/fastflux.py +63 -30
- webscout/Provider/TTI/gpt1image.py +149 -0
- webscout/Provider/TTI/imagen.py +196 -0
- webscout/Provider/TTI/magicstudio.py +60 -29
- webscout/Provider/TTI/piclumen.py +43 -32
- webscout/Provider/TTI/pixelmuse.py +232 -225
- webscout/Provider/TTI/pollinations.py +43 -32
- webscout/Provider/TTI/together.py +287 -0
- webscout/Provider/TTI/utils.py +2 -1
- webscout/Provider/TTS/README.md +1 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/freetts.py +140 -0
- webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
- webscout/Provider/__init__.py +3 -2
- webscout/Provider/granite.py +41 -6
- webscout/Provider/oivscode.py +37 -37
- webscout/Provider/scira_chat.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/toolbaz.py +0 -1
- webscout/litagent/Readme.md +12 -3
- webscout/litagent/agent.py +99 -62
- webscout/version.py +1 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/METADATA +2 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/RECORD +98 -87
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/WHEEL +1 -1
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/artbit.py +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
import re
|
|
7
|
+
import random
|
|
8
|
+
import string
|
|
9
|
+
from rich import print
|
|
10
|
+
from webscout.litagent.agent import LitAgent
|
|
11
|
+
import cloudscraper
|
|
12
|
+
# Import base classes and utility structures
|
|
13
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
14
|
+
from webscout.Provider.OPENAI.utils import (
|
|
15
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
16
|
+
ChatCompletionMessage, CompletionUsage
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
# --- ChutesAI API Key Auto-Generator ---
|
|
20
|
+
def generate_chutesai_api_key():
|
|
21
|
+
url = "https://chutes.ai/auth/start?/create"
|
|
22
|
+
def generate_username(length=8):
|
|
23
|
+
return ''.join(random.choices(string.ascii_letters, k=length))
|
|
24
|
+
username = generate_username()
|
|
25
|
+
agent = LitAgent()
|
|
26
|
+
fingerprint = agent.generate_fingerprint("chrome")
|
|
27
|
+
headers = {
|
|
28
|
+
"Content-Type": "application/x-www-form-urlencoded",
|
|
29
|
+
"Accept": "application/json",
|
|
30
|
+
"User-Agent": fingerprint["user_agent"],
|
|
31
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
32
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
33
|
+
"DNT": "1",
|
|
34
|
+
"Origin": "https://chutes.ai",
|
|
35
|
+
"Referer": "https://chutes.ai/auth/start",
|
|
36
|
+
"Sec-Ch-Ua": fingerprint["sec_ch_ua"],
|
|
37
|
+
"Sec-Ch-Ua-Mobile": "?0",
|
|
38
|
+
"Sec-Ch-Ua-Platform": fingerprint["platform"],
|
|
39
|
+
"X-Sveltekit-Action": "true"
|
|
40
|
+
}
|
|
41
|
+
data = {
|
|
42
|
+
"username": username,
|
|
43
|
+
"coldkey": "hotkey",
|
|
44
|
+
"__superform_id": "xpsmbd"
|
|
45
|
+
}
|
|
46
|
+
scraper = cloudscraper.create_scraper()
|
|
47
|
+
response = scraper.post(url, headers=headers, data=data)
|
|
48
|
+
print(f"[bold green]Status:[/] {response.status_code}")
|
|
49
|
+
|
|
50
|
+
# Ensure response is decoded as UTF-8
|
|
51
|
+
response.encoding = 'utf-8'
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
resp_json = response.json()
|
|
55
|
+
except Exception:
|
|
56
|
+
try:
|
|
57
|
+
# Try to decode the response text with UTF-8 explicitly
|
|
58
|
+
decoded_text = response.content.decode('utf-8', errors='replace')
|
|
59
|
+
print(decoded_text)
|
|
60
|
+
except Exception:
|
|
61
|
+
print("Failed to decode response content")
|
|
62
|
+
return None
|
|
63
|
+
print(resp_json)
|
|
64
|
+
# Extract the api_key using regex from the 'data' field
|
|
65
|
+
if 'data' in resp_json:
|
|
66
|
+
api_key_match = re.search(r'(cpk_[a-zA-Z0-9.]+)', resp_json['data'])
|
|
67
|
+
if api_key_match:
|
|
68
|
+
api_key = api_key_match.group(1)
|
|
69
|
+
print(f"[bold yellow]Auto-generated ChutesAI API Key:[/] {api_key}")
|
|
70
|
+
return api_key
|
|
71
|
+
else:
|
|
72
|
+
print("[red]API key not found in response data.")
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
# --- ChutesAI Client ---
|
|
76
|
+
|
|
77
|
+
class Completions(BaseCompletions):
|
|
78
|
+
def __init__(self, client: 'ChutesAI'):
|
|
79
|
+
self._client = client
|
|
80
|
+
|
|
81
|
+
def create(
|
|
82
|
+
self,
|
|
83
|
+
*,
|
|
84
|
+
model: str,
|
|
85
|
+
messages: List[Dict[str, str]],
|
|
86
|
+
max_tokens: Optional[int] = 1024,
|
|
87
|
+
stream: bool = False,
|
|
88
|
+
temperature: Optional[float] = None,
|
|
89
|
+
top_p: Optional[float] = None,
|
|
90
|
+
**kwargs: Any
|
|
91
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
92
|
+
"""
|
|
93
|
+
Creates a model response for the given chat conversation.
|
|
94
|
+
Mimics openai.chat.completions.create
|
|
95
|
+
"""
|
|
96
|
+
payload = {
|
|
97
|
+
"model": model,
|
|
98
|
+
"messages": messages,
|
|
99
|
+
"max_tokens": max_tokens,
|
|
100
|
+
"stream": stream,
|
|
101
|
+
}
|
|
102
|
+
if temperature is not None:
|
|
103
|
+
payload["temperature"] = temperature
|
|
104
|
+
if top_p is not None:
|
|
105
|
+
payload["top_p"] = top_p
|
|
106
|
+
payload.update(kwargs)
|
|
107
|
+
|
|
108
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
109
|
+
created_time = int(time.time())
|
|
110
|
+
|
|
111
|
+
if stream:
|
|
112
|
+
return self._create_stream(request_id, created_time, model, payload)
|
|
113
|
+
else:
|
|
114
|
+
return self._create_non_stream(request_id, created_time, model, payload)
|
|
115
|
+
|
|
116
|
+
def _create_stream(
|
|
117
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
118
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
119
|
+
try:
|
|
120
|
+
response = self._client.scraper.post(
|
|
121
|
+
self._client.base_url,
|
|
122
|
+
headers=self._client.headers,
|
|
123
|
+
json=payload,
|
|
124
|
+
stream=True,
|
|
125
|
+
timeout=self._client.timeout
|
|
126
|
+
)
|
|
127
|
+
response.raise_for_status()
|
|
128
|
+
|
|
129
|
+
prompt_tokens = 0
|
|
130
|
+
completion_tokens = 0
|
|
131
|
+
total_tokens = 0
|
|
132
|
+
|
|
133
|
+
for line in response.iter_lines():
|
|
134
|
+
if line:
|
|
135
|
+
decoded_line = line.decode('utf-8', errors='replace').strip()
|
|
136
|
+
if decoded_line.startswith("data: "):
|
|
137
|
+
json_str = decoded_line[6:]
|
|
138
|
+
if json_str == "[DONE]":
|
|
139
|
+
break
|
|
140
|
+
try:
|
|
141
|
+
data = json.loads(json_str)
|
|
142
|
+
choice_data = data.get('choices', [{}])[0]
|
|
143
|
+
delta_data = choice_data.get('delta', {})
|
|
144
|
+
finish_reason = choice_data.get('finish_reason')
|
|
145
|
+
|
|
146
|
+
usage_data = data.get('usage', {})
|
|
147
|
+
if usage_data:
|
|
148
|
+
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
149
|
+
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
150
|
+
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
151
|
+
|
|
152
|
+
delta = ChoiceDelta(
|
|
153
|
+
content=delta_data.get('content'),
|
|
154
|
+
role=delta_data.get('role'),
|
|
155
|
+
tool_calls=delta_data.get('tool_calls')
|
|
156
|
+
)
|
|
157
|
+
choice = Choice(
|
|
158
|
+
index=choice_data.get('index', 0),
|
|
159
|
+
delta=delta,
|
|
160
|
+
finish_reason=finish_reason,
|
|
161
|
+
logprobs=choice_data.get('logprobs')
|
|
162
|
+
)
|
|
163
|
+
chunk = ChatCompletionChunk(
|
|
164
|
+
id=request_id,
|
|
165
|
+
choices=[choice],
|
|
166
|
+
created=created_time,
|
|
167
|
+
model=model,
|
|
168
|
+
system_fingerprint=data.get('system_fingerprint')
|
|
169
|
+
)
|
|
170
|
+
if hasattr(chunk, "model_dump"):
|
|
171
|
+
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
172
|
+
else:
|
|
173
|
+
chunk_dict = chunk.dict(exclude_none=True)
|
|
174
|
+
usage_dict = {
|
|
175
|
+
"prompt_tokens": prompt_tokens or 10,
|
|
176
|
+
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
177
|
+
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
178
|
+
"estimated_cost": None
|
|
179
|
+
}
|
|
180
|
+
if delta_data.get('content'):
|
|
181
|
+
completion_tokens += 1
|
|
182
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
183
|
+
usage_dict["completion_tokens"] = completion_tokens
|
|
184
|
+
usage_dict["total_tokens"] = total_tokens
|
|
185
|
+
chunk_dict["usage"] = usage_dict
|
|
186
|
+
yield chunk
|
|
187
|
+
except json.JSONDecodeError:
|
|
188
|
+
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
189
|
+
continue
|
|
190
|
+
except requests.exceptions.RequestException as e:
|
|
191
|
+
print(f"Error during ChutesAI stream request: {e}")
|
|
192
|
+
raise IOError(f"ChutesAI request failed: {e}") from e
|
|
193
|
+
except Exception as e:
|
|
194
|
+
print(f"Error processing ChutesAI stream: {e}")
|
|
195
|
+
raise
|
|
196
|
+
|
|
197
|
+
def _create_non_stream(
|
|
198
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
199
|
+
) -> ChatCompletion:
|
|
200
|
+
try:
|
|
201
|
+
response = self._client.scraper.post(
|
|
202
|
+
self._client.base_url,
|
|
203
|
+
headers=self._client.headers,
|
|
204
|
+
json=payload,
|
|
205
|
+
timeout=self._client.timeout
|
|
206
|
+
)
|
|
207
|
+
response.raise_for_status()
|
|
208
|
+
data = response.json()
|
|
209
|
+
choices_data = data.get('choices', [])
|
|
210
|
+
usage_data = data.get('usage', {})
|
|
211
|
+
choices = []
|
|
212
|
+
for choice_d in choices_data:
|
|
213
|
+
message_d = choice_d.get('message', {})
|
|
214
|
+
message = ChatCompletionMessage(
|
|
215
|
+
role=message_d.get('role', 'assistant'),
|
|
216
|
+
content=message_d.get('content', '')
|
|
217
|
+
)
|
|
218
|
+
choice = Choice(
|
|
219
|
+
index=choice_d.get('index', 0),
|
|
220
|
+
message=message,
|
|
221
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
222
|
+
)
|
|
223
|
+
choices.append(choice)
|
|
224
|
+
usage = CompletionUsage(
|
|
225
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
226
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
227
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
228
|
+
)
|
|
229
|
+
completion = ChatCompletion(
|
|
230
|
+
id=request_id,
|
|
231
|
+
choices=choices,
|
|
232
|
+
created=created_time,
|
|
233
|
+
model=data.get('model', model),
|
|
234
|
+
usage=usage,
|
|
235
|
+
)
|
|
236
|
+
return completion
|
|
237
|
+
except requests.exceptions.RequestException as e:
|
|
238
|
+
print(f"Error during ChutesAI non-stream request: {e}")
|
|
239
|
+
raise IOError(f"ChutesAI request failed: {e}") from e
|
|
240
|
+
except Exception as e:
|
|
241
|
+
print(f"Error processing ChutesAI response: {e}")
|
|
242
|
+
raise
|
|
243
|
+
|
|
244
|
+
class Chat(BaseChat):
|
|
245
|
+
def __init__(self, client: 'ChutesAI'):
|
|
246
|
+
self.completions = Completions(client)
|
|
247
|
+
|
|
248
|
+
class ChutesAI(OpenAICompatibleProvider):
|
|
249
|
+
AVAILABLE_MODELS = [
|
|
250
|
+
"deepseek-ai/DeepSeek-V3-0324",
|
|
251
|
+
"deepseek-ai/DeepSeek-R1",
|
|
252
|
+
"NousResearch/DeepHermes-3-Mistral-24B-Preview",
|
|
253
|
+
"chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
254
|
+
]
|
|
255
|
+
def __init__(self, api_key: str = None,):
|
|
256
|
+
self.timeout = None # Infinite timeout
|
|
257
|
+
self.base_url = "https://llm.chutes.ai/v1/chat/completions"
|
|
258
|
+
|
|
259
|
+
# Always generate a new API key, ignore any provided key
|
|
260
|
+
print("[yellow]Generating new ChutesAI API key...[/]")
|
|
261
|
+
self.api_key = generate_chutesai_api_key()
|
|
262
|
+
|
|
263
|
+
if not self.api_key:
|
|
264
|
+
print("[red]Failed to generate API key. Retrying...[/]")
|
|
265
|
+
# Retry once more
|
|
266
|
+
self.api_key = generate_chutesai_api_key()
|
|
267
|
+
|
|
268
|
+
if not self.api_key:
|
|
269
|
+
raise ValueError("Failed to generate ChutesAI API key after multiple attempts.")
|
|
270
|
+
|
|
271
|
+
print(f"[green]Successfully generated API key: {self.api_key[:20]}...[/]")
|
|
272
|
+
|
|
273
|
+
self.scraper = cloudscraper.create_scraper()
|
|
274
|
+
self.headers = {
|
|
275
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
276
|
+
"Content-Type": "application/json"
|
|
277
|
+
}
|
|
278
|
+
self.scraper.headers.update(self.headers)
|
|
279
|
+
self.chat = Chat(self)
|
|
280
|
+
|
|
281
|
+
@property
|
|
282
|
+
def models(self):
|
|
283
|
+
class _ModelList:
|
|
284
|
+
def list(inner_self):
|
|
285
|
+
return type(self).AVAILABLE_MODELS
|
|
286
|
+
return _ModelList()
|
|
287
|
+
|
|
288
|
+
if __name__ == "__main__":
|
|
289
|
+
try:
|
|
290
|
+
# Example usage - always use generated API key
|
|
291
|
+
client = ChutesAI()
|
|
292
|
+
|
|
293
|
+
messages = [
|
|
294
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
295
|
+
{"role": "user", "content": "What is the capital of France?"}
|
|
296
|
+
]
|
|
297
|
+
|
|
298
|
+
print("[cyan]Making API request...[/]")
|
|
299
|
+
response = client.chat.completions.create(
|
|
300
|
+
model="deepseek-ai/DeepSeek-V3-0324",
|
|
301
|
+
messages=messages,
|
|
302
|
+
max_tokens=50,
|
|
303
|
+
stream=True
|
|
304
|
+
)
|
|
305
|
+
for chunk in response:
|
|
306
|
+
if hasattr(chunk, "model_dump"):
|
|
307
|
+
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
308
|
+
else:
|
|
309
|
+
chunk_dict = chunk.dict(exclude_none=True)
|
|
310
|
+
print(f"[green]Response Chunk:[/] {chunk_dict}")
|
|
311
|
+
|
|
312
|
+
except Exception as e:
|
|
313
|
+
print(f"[red]Error: {e}[/]")
|
|
314
|
+
print("[yellow]If the issue persists, the ChutesAI service might be down or the API key generation method needs updating.[/]")
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
|
|
4
|
+
def fetch_together_models():
|
|
5
|
+
"""Fetch models from Together.xyz API"""
|
|
6
|
+
api_key = "56c8eeff9971269d7a7e625ff88e8a83a34a556003a5c87c289ebe9a3d8a3d2c"
|
|
7
|
+
endpoint = "https://api.together.xyz/v1/models"
|
|
8
|
+
|
|
9
|
+
headers = {
|
|
10
|
+
"Authorization": f"Bearer {api_key}",
|
|
11
|
+
"Accept": "application/json"
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
response = requests.get(endpoint, headers=headers, timeout=30)
|
|
16
|
+
response.raise_for_status()
|
|
17
|
+
|
|
18
|
+
models_data = response.json()
|
|
19
|
+
|
|
20
|
+
# Extract and categorize models
|
|
21
|
+
chat_models = []
|
|
22
|
+
image_models = []
|
|
23
|
+
language_models = []
|
|
24
|
+
all_models = []
|
|
25
|
+
|
|
26
|
+
print(f"Total models found: {len(models_data)}")
|
|
27
|
+
print("\n" + "="*80)
|
|
28
|
+
|
|
29
|
+
for model in models_data:
|
|
30
|
+
if isinstance(model, dict):
|
|
31
|
+
model_id = model.get("id", "")
|
|
32
|
+
model_type = model.get("type", "").lower()
|
|
33
|
+
context_length = model.get("context_length", 0)
|
|
34
|
+
|
|
35
|
+
if not model_id:
|
|
36
|
+
continue
|
|
37
|
+
|
|
38
|
+
all_models.append(model_id)
|
|
39
|
+
|
|
40
|
+
# Categorize by type
|
|
41
|
+
if model_type == "chat":
|
|
42
|
+
chat_models.append(model_id)
|
|
43
|
+
elif model_type == "image":
|
|
44
|
+
image_models.append(model_id)
|
|
45
|
+
elif model_type == "language":
|
|
46
|
+
language_models.append(model_id)
|
|
47
|
+
|
|
48
|
+
# Print model details
|
|
49
|
+
print(f"Model: {model_id}")
|
|
50
|
+
print(f" Type: {model_type}")
|
|
51
|
+
print(f" Context Length: {context_length}")
|
|
52
|
+
if model.get("config"):
|
|
53
|
+
config = model["config"]
|
|
54
|
+
if config.get("stop"):
|
|
55
|
+
print(f" Stop Tokens: {config['stop']}")
|
|
56
|
+
print("-" * 40)
|
|
57
|
+
|
|
58
|
+
print(f"\nSUMMARY:")
|
|
59
|
+
print(f"Chat Models: {len(chat_models)}")
|
|
60
|
+
print(f"Image Models: {len(image_models)}")
|
|
61
|
+
print(f"Language Models: {len(language_models)}")
|
|
62
|
+
print(f"Total Models: {len(all_models)}")
|
|
63
|
+
|
|
64
|
+
# Generate Python list for code
|
|
65
|
+
print("\n" + "="*80)
|
|
66
|
+
print("AVAILABLE_MODELS = [")
|
|
67
|
+
for model in sorted(all_models):
|
|
68
|
+
print(f' "{model}",')
|
|
69
|
+
print("]")
|
|
70
|
+
|
|
71
|
+
return {
|
|
72
|
+
"all_models": all_models,
|
|
73
|
+
"chat_models": chat_models,
|
|
74
|
+
"image_models": image_models,
|
|
75
|
+
"language_models": language_models,
|
|
76
|
+
"raw_data": models_data
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
except requests.exceptions.RequestException as e:
|
|
80
|
+
print(f"Error fetching models: {e}")
|
|
81
|
+
return None
|
|
82
|
+
except json.JSONDecodeError as e:
|
|
83
|
+
print(f"Error parsing JSON response: {e}")
|
|
84
|
+
return None
|
|
85
|
+
|
|
86
|
+
if __name__ == "__main__":
|
|
87
|
+
result = fetch_together_models()
|
|
88
|
+
|
|
89
|
+
if result:
|
|
90
|
+
print(f"\n📊 Successfully fetched {len(result['all_models'])} models from Together.xyz")
|
|
91
|
+
|
|
92
|
+
# Save to file
|
|
93
|
+
with open("together_models.json", "w") as f:
|
|
94
|
+
json.dump(result, f, indent=2)
|
|
95
|
+
print("✅ Results saved to together_models.json")
|
webscout/Provider/__init__.py
CHANGED
|
@@ -42,7 +42,6 @@ from .chatglm import *
|
|
|
42
42
|
from .hermes import *
|
|
43
43
|
from .TextPollinationsAI import *
|
|
44
44
|
from .Glider import *
|
|
45
|
-
from .ChatGPTGratis import *
|
|
46
45
|
from .QwenLM import *
|
|
47
46
|
from .granite import *
|
|
48
47
|
from .WiseCat import *
|
|
@@ -84,6 +83,9 @@ from .Flowith import Flowith
|
|
|
84
83
|
from .samurai import samurai
|
|
85
84
|
from .lmarena import lmarena
|
|
86
85
|
from .oivscode import oivscode
|
|
86
|
+
|
|
87
|
+
# Import STT providers
|
|
88
|
+
from .STT import *
|
|
87
89
|
__all__ = [
|
|
88
90
|
'SCNet',
|
|
89
91
|
'oivscode',
|
|
@@ -111,7 +113,6 @@ __all__ = [
|
|
|
111
113
|
'WiseCat',
|
|
112
114
|
'IBMGranite',
|
|
113
115
|
'QwenLM',
|
|
114
|
-
'ChatGPTGratis',
|
|
115
116
|
'LambdaChat',
|
|
116
117
|
'TextPollinationsAI',
|
|
117
118
|
'GliderAI',
|
webscout/Provider/granite.py
CHANGED
|
@@ -14,11 +14,11 @@ class IBMGranite(Provider):
|
|
|
14
14
|
using Lit agent for the user agent.
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
-
AVAILABLE_MODELS = ["granite-3-8b-instruct", "granite-3-2-8b-instruct"]
|
|
17
|
+
AVAILABLE_MODELS = ["granite-3-8b-instruct", "granite-3-2-8b-instruct", "granite-3-3-8b-instruct"]
|
|
18
18
|
|
|
19
19
|
def __init__(
|
|
20
20
|
self,
|
|
21
|
-
api_key: str,
|
|
21
|
+
api_key: str = None,
|
|
22
22
|
is_conversation: bool = True,
|
|
23
23
|
max_tokens: int = 600, # Note: max_tokens is not used by this API
|
|
24
24
|
timeout: int = 30,
|
|
@@ -28,7 +28,7 @@ class IBMGranite(Provider):
|
|
|
28
28
|
proxies: dict = {},
|
|
29
29
|
history_offset: int = 10250,
|
|
30
30
|
act: str = None,
|
|
31
|
-
model: str = "granite-3-
|
|
31
|
+
model: str = "granite-3-3-8b-instruct",
|
|
32
32
|
system_prompt: str = "You are a helpful AI assistant.",
|
|
33
33
|
thinking: bool = False,
|
|
34
34
|
):
|
|
@@ -36,6 +36,10 @@ class IBMGranite(Provider):
|
|
|
36
36
|
if model not in self.AVAILABLE_MODELS:
|
|
37
37
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
38
38
|
|
|
39
|
+
# Auto-generate API key if not provided or empty
|
|
40
|
+
if not api_key:
|
|
41
|
+
api_key = self.generate_api_key()
|
|
42
|
+
|
|
39
43
|
# Initialize curl_cffi Session
|
|
40
44
|
self.session = Session()
|
|
41
45
|
self.is_conversation = is_conversation
|
|
@@ -55,6 +59,7 @@ class IBMGranite(Provider):
|
|
|
55
59
|
"content-type": "application/json",
|
|
56
60
|
"origin": "https://www.ibm.com", # Keep origin
|
|
57
61
|
"referer": "https://www.ibm.com/", # Keep referer
|
|
62
|
+
"User-Agent": Lit().random(),
|
|
58
63
|
}
|
|
59
64
|
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
60
65
|
|
|
@@ -84,6 +89,36 @@ class IBMGranite(Provider):
|
|
|
84
89
|
return chunk[1]
|
|
85
90
|
return None
|
|
86
91
|
|
|
92
|
+
@staticmethod
|
|
93
|
+
def generate_api_key() -> str:
|
|
94
|
+
"""
|
|
95
|
+
Auto-generate an API key (sessionId) by making a GET request to the Granite auth endpoint.
|
|
96
|
+
Returns:
|
|
97
|
+
str: The sessionId to be used as the API key.
|
|
98
|
+
Raises:
|
|
99
|
+
Exception: If the sessionId cannot be retrieved.
|
|
100
|
+
"""
|
|
101
|
+
session = Session()
|
|
102
|
+
headers = {
|
|
103
|
+
"User-Agent": Lit().random(),
|
|
104
|
+
"Origin": "https://www.ibm.com",
|
|
105
|
+
"Referer": "https://d18n68ssusgr7r.cloudfront.net/",
|
|
106
|
+
"Accept": "application/json,application/jsonl",
|
|
107
|
+
}
|
|
108
|
+
session.headers.update(headers)
|
|
109
|
+
url = "https://d18n68ssusgr7r.cloudfront.net/v1/auth"
|
|
110
|
+
resp = session.get(url, timeout=15, impersonate="chrome110")
|
|
111
|
+
if resp.status_code != 200:
|
|
112
|
+
raise Exception(f"Failed to get Granite API key: {resp.status_code} - {resp.text}")
|
|
113
|
+
try:
|
|
114
|
+
data = resp.json()
|
|
115
|
+
session_id = data.get("sessionId")
|
|
116
|
+
if not session_id:
|
|
117
|
+
raise Exception(f"No sessionId in Granite auth response: {data}")
|
|
118
|
+
return session_id
|
|
119
|
+
except Exception as e:
|
|
120
|
+
raise Exception(f"Failed to parse Granite auth response: {e}")
|
|
121
|
+
|
|
87
122
|
def ask(
|
|
88
123
|
self,
|
|
89
124
|
prompt: str,
|
|
@@ -117,8 +152,9 @@ class IBMGranite(Provider):
|
|
|
117
152
|
{"role": "system", "content": self.system_prompt},
|
|
118
153
|
{"role": "user", "content": conversation_prompt},
|
|
119
154
|
],
|
|
120
|
-
"stream": True # API seems to require stream=True based on response format
|
|
121
155
|
}
|
|
156
|
+
if self.thinking:
|
|
157
|
+
payload["thinking"] = True
|
|
122
158
|
|
|
123
159
|
def for_stream():
|
|
124
160
|
streaming_text = "" # Initialize outside try block
|
|
@@ -227,9 +263,8 @@ if __name__ == "__main__":
|
|
|
227
263
|
from rich import print
|
|
228
264
|
# Example usage: Initialize without logging.
|
|
229
265
|
ai = IBMGranite(
|
|
230
|
-
api_key="", # press f12 to see the API key
|
|
231
266
|
thinking=True,
|
|
232
267
|
)
|
|
233
|
-
response = ai.chat("
|
|
268
|
+
response = ai.chat("How many r in strawberry", stream=True)
|
|
234
269
|
for chunk in response:
|
|
235
270
|
print(chunk, end="", flush=True)
|
webscout/Provider/oivscode.py
CHANGED
|
@@ -262,43 +262,43 @@ class oivscode(Provider):
|
|
|
262
262
|
return response["text"]
|
|
263
263
|
return ""
|
|
264
264
|
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
265
|
+
def fetch_available_models(self):
|
|
266
|
+
"""Fetches available models from the /models endpoint of all API endpoints and prints models per endpoint."""
|
|
267
|
+
endpoints = self.api_endpoints.copy()
|
|
268
|
+
random.shuffle(endpoints)
|
|
269
|
+
results = {}
|
|
270
|
+
errors = []
|
|
271
|
+
for endpoint in endpoints:
|
|
272
|
+
models_url = endpoint.replace('/v1/chat/completions', '/v1/models')
|
|
273
|
+
try:
|
|
274
|
+
response = self.session.get(models_url, timeout=self.timeout)
|
|
275
|
+
if response.ok:
|
|
276
|
+
data = response.json()
|
|
277
|
+
if isinstance(data, dict) and "data" in data:
|
|
278
|
+
models = [m["id"] if isinstance(m, dict) and "id" in m else m for m in data["data"]]
|
|
279
|
+
elif isinstance(data, list):
|
|
280
|
+
models = data
|
|
281
|
+
else:
|
|
282
|
+
models = list(data.keys()) if isinstance(data, dict) else []
|
|
283
|
+
results[models_url] = models
|
|
284
|
+
else:
|
|
285
|
+
errors.append(f"Failed to fetch models from {models_url}: {response.status_code} {response.text}")
|
|
286
|
+
except Exception as e:
|
|
287
|
+
errors.append(f"Error fetching from {models_url}: {e}")
|
|
288
|
+
if results:
|
|
289
|
+
for url, models in results.items():
|
|
290
|
+
print(f"Models from {url}:")
|
|
291
|
+
if models:
|
|
292
|
+
for m in sorted(models):
|
|
293
|
+
print(f" {m}")
|
|
294
|
+
else:
|
|
295
|
+
print(" No models found.")
|
|
296
|
+
return results
|
|
297
|
+
else:
|
|
298
|
+
print("No models found from any endpoint.")
|
|
299
|
+
for err in errors:
|
|
300
|
+
print(err)
|
|
301
|
+
return {}
|
|
302
302
|
|
|
303
303
|
if __name__ == "__main__":
|
|
304
304
|
from rich import print
|
webscout/Provider/scira_chat.py
CHANGED
|
@@ -20,12 +20,13 @@ class SciraAI(Provider):
|
|
|
20
20
|
AVAILABLE_MODELS = {
|
|
21
21
|
"scira-default": "Grok3-mini", # thinking model
|
|
22
22
|
"scira-grok-3": "Grok3",
|
|
23
|
-
"scira-anthropic": "
|
|
23
|
+
"scira-anthropic": "Claude 4 Sonnet",
|
|
24
|
+
"scira-anthropic-thinking": "Claude 4 Sonnet Thinking", # thinking model
|
|
24
25
|
"scira-vision" : "Grok2-Vision", # vision model
|
|
25
26
|
"scira-4o": "GPT4o",
|
|
26
27
|
"scira-qwq": "QWQ-32B",
|
|
27
28
|
"scira-o4-mini": "o4-mini",
|
|
28
|
-
"scira-google": "gemini 2.5 flash",
|
|
29
|
+
"scira-google": "gemini 2.5 flash Thinking", # thinking model
|
|
29
30
|
"scira-google-pro": "gemini 2.5 pro",
|
|
30
31
|
"scira-llama-4": "llama 4 Maverick",
|
|
31
32
|
}
|
webscout/Provider/scnet.py
CHANGED
|
@@ -18,6 +18,7 @@ class SCNet(Provider):
|
|
|
18
18
|
{"modelId": 5, "name": "Deepseek-r1-70B"},
|
|
19
19
|
{"modelId": 7, "name": "QWQ-32B"},
|
|
20
20
|
{"modelId": 8, "name": "minimax-text-01-456B"},
|
|
21
|
+
{"modelId": 9, "name": "Qwen3-30B-A3B"}, # Added new model
|
|
21
22
|
# Add more models here as needed
|
|
22
23
|
]
|
|
23
24
|
MODEL_NAME_TO_ID = {m["name"]: m["modelId"] for m in AVAILABLE_MODELS}
|