webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -239
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
- webscout/Extra/autocoder/autocoder.py +309 -114
- webscout/Extra/autocoder/autocoder_utiles.py +15 -15
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/weather.md +281 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Provider/AISEARCH/DeepFind.py +41 -37
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +0 -1
- webscout/Provider/AISEARCH/genspark_search.py +228 -86
- webscout/Provider/AISEARCH/hika_search.py +11 -11
- webscout/Provider/AISEARCH/scira_search.py +324 -322
- webscout/Provider/AllenAI.py +7 -14
- webscout/Provider/Blackboxai.py +518 -74
- webscout/Provider/Cloudflare.py +0 -1
- webscout/Provider/Deepinfra.py +23 -21
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/GizAI.py +15 -5
- webscout/Provider/Glider.py +11 -8
- webscout/Provider/HeckAI.py +80 -52
- webscout/Provider/Koboldai.py +7 -4
- webscout/Provider/LambdaChat.py +2 -2
- webscout/Provider/Marcus.py +10 -18
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +8 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -286
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +217 -14
- webscout/Provider/OPENAI/c4ai.py +373 -367
- webscout/Provider/OPENAI/chatgpt.py +7 -0
- webscout/Provider/OPENAI/chatgptclone.py +7 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +30 -20
- webscout/Provider/OPENAI/e2b.py +6 -0
- webscout/Provider/OPENAI/exaai.py +7 -0
- webscout/Provider/OPENAI/exachat.py +6 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -352
- webscout/Provider/OPENAI/glider.py +323 -316
- webscout/Provider/OPENAI/groq.py +361 -354
- webscout/Provider/OPENAI/heckai.py +30 -64
- webscout/Provider/OPENAI/llmchatco.py +8 -0
- webscout/Provider/OPENAI/mcpcore.py +7 -0
- webscout/Provider/OPENAI/multichat.py +8 -0
- webscout/Provider/OPENAI/netwrck.py +356 -350
- webscout/Provider/OPENAI/opkfc.py +8 -0
- webscout/Provider/OPENAI/scirachat.py +471 -462
- webscout/Provider/OPENAI/sonus.py +9 -0
- webscout/Provider/OPENAI/standardinput.py +9 -1
- webscout/Provider/OPENAI/textpollinations.py +339 -329
- webscout/Provider/OPENAI/toolbaz.py +7 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -346
- webscout/Provider/OPENAI/uncovrAI.py +7 -0
- webscout/Provider/OPENAI/utils.py +103 -7
- webscout/Provider/OPENAI/venice.py +12 -0
- webscout/Provider/OPENAI/wisecat.py +19 -19
- webscout/Provider/OPENAI/writecream.py +7 -0
- webscout/Provider/OPENAI/x0gpt.py +7 -0
- webscout/Provider/OPENAI/yep.py +50 -21
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/speechma.py +500 -100
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TeachAnything.py +3 -7
- webscout/Provider/TextPollinationsAI.py +4 -2
- webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Writecream.py +11 -2
- webscout/Provider/__init__.py +8 -14
- webscout/Provider/ai4chat.py +4 -58
- webscout/Provider/asksteve.py +17 -9
- webscout/Provider/cerebras.py +3 -1
- webscout/Provider/koala.py +170 -268
- webscout/Provider/llmchat.py +3 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +7 -4
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +4 -2
- webscout/Provider/typefully.py +23 -151
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/scout/README.md +402 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +134 -54
- webscout/zeroart/base.py +19 -13
- webscout/zeroart/effects.py +101 -99
- webscout/zeroart/fonts.py +1239 -816
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.8.dist-info/entry_points.txt +3 -0
- webscout-8.2.8.dist-info/top_level.txt +1 -0
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/scout/core.py +0 -881
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -547,3 +547,10 @@ class ChatGPT(OpenAICompatibleProvider):
|
|
|
547
547
|
|
|
548
548
|
# Initialize chat interface
|
|
549
549
|
self.chat = Chat(self)
|
|
550
|
+
|
|
551
|
+
@property
|
|
552
|
+
def models(self):
|
|
553
|
+
class _ModelList:
|
|
554
|
+
def list(inner_self):
|
|
555
|
+
return type(self).AVAILABLE_MODELS
|
|
556
|
+
return _ModelList()
|
|
@@ -479,3 +479,10 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
479
479
|
# Default to the most capable model
|
|
480
480
|
print(f"Warning: Unknown model '{model}'. Using 'gpt-4' instead.")
|
|
481
481
|
return "gpt-4"
|
|
482
|
+
|
|
483
|
+
@property
|
|
484
|
+
def models(self):
|
|
485
|
+
class _ModelList:
|
|
486
|
+
def list(inner_self):
|
|
487
|
+
return type(self).AVAILABLE_MODELS
|
|
488
|
+
return _ModelList()
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
2
|
+
import time
|
|
3
|
+
import json
|
|
4
|
+
from webscout.litagent import LitAgent
|
|
5
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
6
|
+
from .utils import (
|
|
7
|
+
ChatCompletion,
|
|
8
|
+
ChatCompletionChunk,
|
|
9
|
+
Choice,
|
|
10
|
+
ChatCompletionMessage,
|
|
11
|
+
ChoiceDelta,
|
|
12
|
+
CompletionUsage,
|
|
13
|
+
format_prompt
|
|
14
|
+
)
|
|
15
|
+
import requests
|
|
16
|
+
|
|
17
|
+
# ANSI escape codes for formatting
|
|
18
|
+
BOLD = "\033[1m"
|
|
19
|
+
RED = "\033[91m"
|
|
20
|
+
RESET = "\033[0m"
|
|
21
|
+
|
|
22
|
+
class Completions(BaseCompletions):
|
|
23
|
+
def __init__(self, client: 'ChatSandbox'):
|
|
24
|
+
self._client = client
|
|
25
|
+
|
|
26
|
+
def create(
|
|
27
|
+
self,
|
|
28
|
+
*,
|
|
29
|
+
model: str,
|
|
30
|
+
messages: List[Dict[str, str]],
|
|
31
|
+
max_tokens: Optional[int] = None,
|
|
32
|
+
stream: bool = False,
|
|
33
|
+
temperature: Optional[float] = None,
|
|
34
|
+
top_p: Optional[float] = None,
|
|
35
|
+
**kwargs: Any
|
|
36
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
37
|
+
"""
|
|
38
|
+
OpenAI-compatible chat/completions endpoint for ChatSandbox.
|
|
39
|
+
"""
|
|
40
|
+
# Use model name conversion for compatibility
|
|
41
|
+
model = self._client.convert_model_name(model)
|
|
42
|
+
# Compose the conversation prompt using format_prompt
|
|
43
|
+
question = format_prompt(messages, add_special_tokens=True)
|
|
44
|
+
payload = {
|
|
45
|
+
"messages": [question],
|
|
46
|
+
"character": model
|
|
47
|
+
}
|
|
48
|
+
request_id = f"chatcmpl-{int(time.time() * 1000)}"
|
|
49
|
+
created_time = int(time.time())
|
|
50
|
+
url = "https://chatsandbox.com/api/chat"
|
|
51
|
+
agent = LitAgent()
|
|
52
|
+
headers = {
|
|
53
|
+
'authority': 'chatsandbox.com',
|
|
54
|
+
'accept': '*/*',
|
|
55
|
+
'accept-encoding': 'gzip, deflate, br',
|
|
56
|
+
'accept-language': 'en-US,en;q=0.9',
|
|
57
|
+
'content-type': 'application/json',
|
|
58
|
+
'origin': 'https://chatsandbox.com',
|
|
59
|
+
'referer': f'https://chatsandbox.com/chat/{model}',
|
|
60
|
+
'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
61
|
+
'sec-ch-ua-mobile': '?0',
|
|
62
|
+
'sec-ch-ua-platform': '"Windows"',
|
|
63
|
+
'sec-fetch-dest': 'empty',
|
|
64
|
+
'sec-fetch-mode': 'cors',
|
|
65
|
+
'sec-fetch-site': 'same-origin',
|
|
66
|
+
'user-agent': agent.random(),
|
|
67
|
+
'dnt': '1',
|
|
68
|
+
'sec-gpc': '1',
|
|
69
|
+
}
|
|
70
|
+
session = requests.Session()
|
|
71
|
+
session.headers.update(headers)
|
|
72
|
+
def for_stream():
|
|
73
|
+
try:
|
|
74
|
+
response = session.post(
|
|
75
|
+
url,
|
|
76
|
+
json=payload,
|
|
77
|
+
stream=True,
|
|
78
|
+
timeout=30
|
|
79
|
+
)
|
|
80
|
+
response.raise_for_status()
|
|
81
|
+
streaming_text = ""
|
|
82
|
+
for chunk in response.iter_content(chunk_size=None):
|
|
83
|
+
if not chunk:
|
|
84
|
+
continue
|
|
85
|
+
text = chunk.decode('utf-8', errors='replace')
|
|
86
|
+
try:
|
|
87
|
+
data = json.loads(text)
|
|
88
|
+
content = data.get("reasoning_content", text)
|
|
89
|
+
except Exception:
|
|
90
|
+
content = text
|
|
91
|
+
streaming_text += content
|
|
92
|
+
delta = ChoiceDelta(content=content)
|
|
93
|
+
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
94
|
+
chunk_obj = ChatCompletionChunk(
|
|
95
|
+
id=request_id,
|
|
96
|
+
choices=[choice],
|
|
97
|
+
created=created_time,
|
|
98
|
+
model=model,
|
|
99
|
+
)
|
|
100
|
+
yield chunk_obj
|
|
101
|
+
# Final chunk
|
|
102
|
+
delta = ChoiceDelta(content=None)
|
|
103
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
104
|
+
chunk_obj = ChatCompletionChunk(
|
|
105
|
+
id=request_id,
|
|
106
|
+
choices=[choice],
|
|
107
|
+
created=created_time,
|
|
108
|
+
model=model,
|
|
109
|
+
)
|
|
110
|
+
yield chunk_obj
|
|
111
|
+
except Exception as e:
|
|
112
|
+
raise RuntimeError(f"ChatSandbox streaming request failed: {e}")
|
|
113
|
+
def for_non_stream():
|
|
114
|
+
try:
|
|
115
|
+
response = session.post(
|
|
116
|
+
url,
|
|
117
|
+
json=payload,
|
|
118
|
+
timeout=30
|
|
119
|
+
)
|
|
120
|
+
response.raise_for_status()
|
|
121
|
+
text = response.text
|
|
122
|
+
try:
|
|
123
|
+
data = json.loads(text)
|
|
124
|
+
content = data.get("reasoning_content", text)
|
|
125
|
+
except Exception:
|
|
126
|
+
content = text
|
|
127
|
+
prompt_tokens = len(question) // 4
|
|
128
|
+
completion_tokens = len(content) // 4
|
|
129
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
130
|
+
usage = CompletionUsage(
|
|
131
|
+
prompt_tokens=prompt_tokens,
|
|
132
|
+
completion_tokens=completion_tokens,
|
|
133
|
+
total_tokens=total_tokens
|
|
134
|
+
)
|
|
135
|
+
message = ChatCompletionMessage(role="assistant", content=content)
|
|
136
|
+
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
137
|
+
completion = ChatCompletion(
|
|
138
|
+
id=request_id,
|
|
139
|
+
choices=[choice],
|
|
140
|
+
created=created_time,
|
|
141
|
+
model=model,
|
|
142
|
+
usage=usage,
|
|
143
|
+
)
|
|
144
|
+
return completion
|
|
145
|
+
except Exception as e:
|
|
146
|
+
raise RuntimeError(f"ChatSandbox request failed: {e}")
|
|
147
|
+
return for_stream() if stream else for_non_stream()
|
|
148
|
+
|
|
149
|
+
class Chat(BaseChat):
|
|
150
|
+
def __init__(self, client: 'ChatSandbox'):
|
|
151
|
+
self.completions = Completions(client)
|
|
152
|
+
|
|
153
|
+
class ChatSandbox(OpenAICompatibleProvider):
|
|
154
|
+
AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large"]
|
|
155
|
+
chat: Chat
|
|
156
|
+
def __init__(self):
|
|
157
|
+
self.chat = Chat(self)
|
|
158
|
+
@property
|
|
159
|
+
def models(self):
|
|
160
|
+
class _ModelList:
|
|
161
|
+
def list(inner_self):
|
|
162
|
+
return type(self).AVAILABLE_MODELS
|
|
163
|
+
return _ModelList()
|
|
164
|
+
def convert_model_name(self, model: str) -> str:
|
|
165
|
+
if model in self.AVAILABLE_MODELS:
|
|
166
|
+
return model
|
|
167
|
+
for available_model in self.AVAILABLE_MODELS:
|
|
168
|
+
if model.lower() in available_model.lower():
|
|
169
|
+
return available_model
|
|
170
|
+
# Default to openai if no match
|
|
171
|
+
print(f"{RED}{BOLD}Warning: Model '{model}' not found, using default model 'openai'{RESET}")
|
|
172
|
+
return "openai"
|
|
@@ -221,6 +221,7 @@ class Chat(BaseChat):
|
|
|
221
221
|
self.completions = Completions(client)
|
|
222
222
|
|
|
223
223
|
class DeepInfra(OpenAICompatibleProvider):
|
|
224
|
+
|
|
224
225
|
AVAILABLE_MODELS = [
|
|
225
226
|
# "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
|
|
226
227
|
|
|
@@ -229,12 +230,32 @@ class DeepInfra(OpenAICompatibleProvider):
|
|
|
229
230
|
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
230
231
|
"deepseek-ai/DeepSeek-R1-Turbo",
|
|
231
232
|
"deepseek-ai/DeepSeek-V3",
|
|
232
|
-
|
|
233
|
+
"deepseek-ai/DeepSeek-Prover-V2-671B",
|
|
233
234
|
"google/gemma-2-27b-it",
|
|
234
235
|
"google/gemma-2-9b-it",
|
|
235
|
-
"google/gemma-3-27b-it",
|
|
236
236
|
"google/gemma-3-12b-it",
|
|
237
|
+
"google/gemma-3-27b-it",
|
|
237
238
|
"google/gemma-3-4b-it",
|
|
239
|
+
"meta-llama/Llama-3.3-70B-Instruct",
|
|
240
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
241
|
+
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
242
|
+
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
243
|
+
"meta-llama/Llama-Guard-4-12B",
|
|
244
|
+
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
245
|
+
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
246
|
+
"microsoft/Phi-4-multimodal-instruct",
|
|
247
|
+
"microsoft/WizardLM-2-8x22B",
|
|
248
|
+
"microsoft/phi-4",
|
|
249
|
+
"microsoft/phi-4-reasoning-plus",
|
|
250
|
+
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
251
|
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
252
|
+
"Qwen/QwQ-32B",
|
|
253
|
+
"Qwen/Qwen2.5-72B-Instruct",
|
|
254
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
255
|
+
"Qwen/Qwen3-14B",
|
|
256
|
+
"Qwen/Qwen3-30B-A3B",
|
|
257
|
+
"Qwen/Qwen3-32B",
|
|
258
|
+
"Qwen/Qwen3-235B-A22B",
|
|
238
259
|
# "google/gemini-1.5-flash", # >>>> NOT WORKING
|
|
239
260
|
# "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
|
|
240
261
|
# "google/gemini-2.0-flash-001", # >>>> NOT WORKING
|
|
@@ -243,37 +264,19 @@ class DeepInfra(OpenAICompatibleProvider):
|
|
|
243
264
|
|
|
244
265
|
# "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
|
|
245
266
|
# "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
|
|
246
|
-
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
247
|
-
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
248
267
|
# "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
|
|
249
268
|
# "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
|
|
250
|
-
"meta-llama/Llama-3.3-70B-Instruct",
|
|
251
|
-
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
252
269
|
# "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
|
|
253
270
|
# "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
|
|
254
271
|
# "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
|
|
255
272
|
# "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
|
|
256
|
-
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
257
|
-
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
258
273
|
# "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
|
|
259
|
-
|
|
260
|
-
"microsoft/phi-4",
|
|
261
|
-
"microsoft/Phi-4-multimodal-instruct",
|
|
262
|
-
"microsoft/WizardLM-2-8x22B",
|
|
263
274
|
# "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
|
|
264
275
|
# "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
|
|
265
276
|
# "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
|
|
266
|
-
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
267
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
268
277
|
# "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
|
|
269
278
|
# "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
|
|
270
|
-
"Qwen/QwQ-32B",
|
|
271
279
|
# "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
|
|
272
|
-
"Qwen/Qwen2.5-72B-Instruct",
|
|
273
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
274
|
-
"Qwen/Qwen3-14B",
|
|
275
|
-
"Qwen/Qwen3-30B-A3B",
|
|
276
|
-
"Qwen/Qwen3-32B",
|
|
277
280
|
# "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
|
|
278
281
|
# "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
|
|
279
282
|
]
|
|
@@ -307,3 +310,10 @@ class DeepInfra(OpenAICompatibleProvider):
|
|
|
307
310
|
}
|
|
308
311
|
self.session.headers.update(self.headers)
|
|
309
312
|
self.chat = Chat(self)
|
|
313
|
+
|
|
314
|
+
@property
|
|
315
|
+
def models(self):
|
|
316
|
+
class _ModelList:
|
|
317
|
+
def list(inner_self):
|
|
318
|
+
return type(self).AVAILABLE_MODELS
|
|
319
|
+
return _ModelList()
|
webscout/Provider/OPENAI/e2b.py
CHANGED
|
@@ -1158,6 +1158,12 @@ class E2B(OpenAICompatibleProvider):
|
|
|
1158
1158
|
# Initialize the chat interface
|
|
1159
1159
|
self.chat = Chat(self)
|
|
1160
1160
|
|
|
1161
|
+
@property
|
|
1162
|
+
def models(self):
|
|
1163
|
+
class _ModelList:
|
|
1164
|
+
def list(inner_self):
|
|
1165
|
+
return type(self).AVAILABLE_MODELS
|
|
1166
|
+
return _ModelList()
|
|
1161
1167
|
def convert_model_name(self, model: str) -> str:
|
|
1162
1168
|
"""Normalize and validate model name."""
|
|
1163
1169
|
normalized_model = self.MODEL_NAME_NORMALIZATION.get(model, model)
|
|
@@ -402,3 +402,10 @@ class ExaAI(OpenAICompatibleProvider):
|
|
|
402
402
|
# ExaAI only supports O3-Mini, regardless of the input model
|
|
403
403
|
print(f"Note: ExaAI only supports O3-Mini model. Ignoring provided model '{model}'.")
|
|
404
404
|
return "O3-Mini"
|
|
405
|
+
|
|
406
|
+
@property
|
|
407
|
+
def models(self):
|
|
408
|
+
class _ModelList:
|
|
409
|
+
def list(inner_self):
|
|
410
|
+
return type(self).AVAILABLE_MODELS
|
|
411
|
+
return _ModelList()
|
|
@@ -365,6 +365,12 @@ class ExaChat(OpenAICompatibleProvider):
|
|
|
365
365
|
# Initialize the chat interface
|
|
366
366
|
self.chat = Chat(self)
|
|
367
367
|
|
|
368
|
+
@property
|
|
369
|
+
def models(self):
|
|
370
|
+
class _ModelList:
|
|
371
|
+
def list(inner_self):
|
|
372
|
+
return type(self).AVAILABLE_MODELS
|
|
373
|
+
return _ModelList()
|
|
368
374
|
def _get_endpoint(self, provider: str) -> str:
|
|
369
375
|
"""Get the API endpoint for the specified provider."""
|
|
370
376
|
return MODEL_CONFIGS[provider]["endpoint"]
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
2
|
+
import time
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
# Import base classes and utility structures
|
|
6
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
7
|
+
from .utils import (
|
|
8
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
9
|
+
ChatCompletionMessage, CompletionUsage
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
# Import requests for HTTP requests (instead of curl_cffi)
|
|
13
|
+
import requests
|
|
14
|
+
import zstandard as zstd
|
|
15
|
+
import uuid
|
|
16
|
+
|
|
17
|
+
# Attempt to import LitAgent, fallback if not available
|
|
18
|
+
try:
|
|
19
|
+
from webscout.litagent import LitAgent
|
|
20
|
+
except ImportError:
|
|
21
|
+
class LitAgent:
|
|
22
|
+
def generate_fingerprint(self, browser):
|
|
23
|
+
return {"user_agent": "Mozilla/5.0"}
|
|
24
|
+
|
|
25
|
+
# --- Flowith OpenAI-Compatible Client ---
|
|
26
|
+
|
|
27
|
+
class Completions(BaseCompletions):
|
|
28
|
+
def __init__(self, client: 'Flowith'):
|
|
29
|
+
self.client = client
|
|
30
|
+
|
|
31
|
+
def create(
|
|
32
|
+
self,
|
|
33
|
+
*,
|
|
34
|
+
model: str,
|
|
35
|
+
messages: List[Dict[str, str]],
|
|
36
|
+
max_tokens: Optional[int] = 2048,
|
|
37
|
+
stream: bool = False,
|
|
38
|
+
temperature: Optional[float] = None,
|
|
39
|
+
top_p: Optional[float] = None,
|
|
40
|
+
**kwargs: Any
|
|
41
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
42
|
+
"""
|
|
43
|
+
Implements OpenAI-compatible chat/completions endpoint for Flowith.
|
|
44
|
+
"""
|
|
45
|
+
url = "https://edge.flowith.net/ai/chat?mode=general"
|
|
46
|
+
agent = LitAgent()
|
|
47
|
+
fingerprint = agent.generate_fingerprint("chrome")
|
|
48
|
+
headers = {
|
|
49
|
+
"accept": "*/*",
|
|
50
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
51
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
52
|
+
"content-type": "application/json",
|
|
53
|
+
"origin": "https://flowith.io",
|
|
54
|
+
"referer": "https://edge.flowith.net/",
|
|
55
|
+
"user-agent": fingerprint["user_agent"],
|
|
56
|
+
"dnt": "1",
|
|
57
|
+
"sec-gpc": "1"
|
|
58
|
+
}
|
|
59
|
+
session = requests.Session()
|
|
60
|
+
session.headers.update(headers)
|
|
61
|
+
node_id = str(uuid.uuid4())
|
|
62
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
63
|
+
created_time = int(time.time())
|
|
64
|
+
payload = {
|
|
65
|
+
"model": model,
|
|
66
|
+
"messages": messages,
|
|
67
|
+
"stream": stream,
|
|
68
|
+
"nodeId": node_id
|
|
69
|
+
}
|
|
70
|
+
payload.update(kwargs)
|
|
71
|
+
|
|
72
|
+
def for_stream():
|
|
73
|
+
try:
|
|
74
|
+
print(f"[DEBUG] Sending streaming request to {url} with payload: {payload}")
|
|
75
|
+
response = session.post(
|
|
76
|
+
url,
|
|
77
|
+
json=payload,
|
|
78
|
+
stream=True,
|
|
79
|
+
timeout=30
|
|
80
|
+
)
|
|
81
|
+
print(f"[DEBUG] Response status: {response.status_code}")
|
|
82
|
+
response.raise_for_status()
|
|
83
|
+
for chunk in response.iter_content(chunk_size=4096):
|
|
84
|
+
if not chunk:
|
|
85
|
+
break
|
|
86
|
+
text = chunk.decode('utf-8', errors='replace')
|
|
87
|
+
print(f"[DEBUG] Stream chunk: {repr(text)}")
|
|
88
|
+
delta = ChoiceDelta(content=text, role="assistant")
|
|
89
|
+
choice = Choice(index=0, delta=delta)
|
|
90
|
+
chunk_obj = ChatCompletionChunk(
|
|
91
|
+
id=request_id,
|
|
92
|
+
choices=[choice],
|
|
93
|
+
created=created_time,
|
|
94
|
+
model=model,
|
|
95
|
+
system_fingerprint=None
|
|
96
|
+
)
|
|
97
|
+
yield chunk_obj
|
|
98
|
+
except Exception as e:
|
|
99
|
+
print(f"[DEBUG] Streaming error: {e}")
|
|
100
|
+
raise RuntimeError(f"Flowith streaming request failed: {e}")
|
|
101
|
+
|
|
102
|
+
def for_non_stream():
|
|
103
|
+
try:
|
|
104
|
+
print(f"[DEBUG] Sending non-stream request to {url} with payload: {payload}")
|
|
105
|
+
response = session.post(
|
|
106
|
+
url,
|
|
107
|
+
json=payload,
|
|
108
|
+
timeout=30
|
|
109
|
+
)
|
|
110
|
+
print(f"[DEBUG] Response status: {response.status_code}")
|
|
111
|
+
response.raise_for_status()
|
|
112
|
+
encoding = response.headers.get('Content-Encoding', '').lower()
|
|
113
|
+
print(f"[DEBUG] Response encoding: {encoding}")
|
|
114
|
+
if encoding == 'zstd':
|
|
115
|
+
dctx = zstd.ZstdDecompressor()
|
|
116
|
+
with dctx.stream_reader(response.raw) as reader:
|
|
117
|
+
decompressed = reader.read()
|
|
118
|
+
text = decompressed.decode('utf-8', errors='replace')
|
|
119
|
+
else:
|
|
120
|
+
text = response.text
|
|
121
|
+
print(f"[DEBUG] Raw response text: {repr(text)}")
|
|
122
|
+
# Flowith returns raw text, not JSON
|
|
123
|
+
content = text.strip()
|
|
124
|
+
print(f"[DEBUG] Final content for ChatCompletion: {repr(content)}")
|
|
125
|
+
message = ChatCompletionMessage(role="assistant", content=content)
|
|
126
|
+
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
127
|
+
usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
|
|
128
|
+
completion = ChatCompletion(
|
|
129
|
+
id=request_id,
|
|
130
|
+
choices=[choice],
|
|
131
|
+
created=created_time,
|
|
132
|
+
model=model,
|
|
133
|
+
usage=usage
|
|
134
|
+
)
|
|
135
|
+
print(f"[DEBUG] Returning ChatCompletion: {completion}")
|
|
136
|
+
return completion
|
|
137
|
+
except Exception as e:
|
|
138
|
+
print(f"[DEBUG] Non-streaming error: {e}")
|
|
139
|
+
raise RuntimeError(f"Flowith request failed: {e}")
|
|
140
|
+
|
|
141
|
+
return for_stream() if stream else for_non_stream()
|
|
142
|
+
|
|
143
|
+
class Chat(BaseChat):
|
|
144
|
+
def __init__(self, client: 'Flowith'):
|
|
145
|
+
self.completions = Completions(client)
|
|
146
|
+
|
|
147
|
+
class Flowith(OpenAICompatibleProvider):
|
|
148
|
+
AVAILABLE_MODELS = [
|
|
149
|
+
"gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku",
|
|
150
|
+
"gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"
|
|
151
|
+
]
|
|
152
|
+
|
|
153
|
+
chat: Chat
|
|
154
|
+
def __init__(self):
|
|
155
|
+
self.chat = Chat(self)
|
|
156
|
+
|
|
157
|
+
@property
|
|
158
|
+
def models(self):
|
|
159
|
+
class _ModelList:
|
|
160
|
+
def list(inner_self):
|
|
161
|
+
return type(self).AVAILABLE_MODELS
|
|
162
|
+
return _ModelList()
|