webscout 8.3.4__py3-none-any.whl → 8.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +52 -1016
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/AISEARCH/PERPLEXED_search.py +214 -0
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +2 -0
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +13 -1
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/GithubChat.py +1 -0
- webscout/Provider/GptOss.py +207 -0
- webscout/Provider/Kimi.py +445 -0
- webscout/Provider/Netwrck.py +3 -6
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +12 -8
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +4 -4
- webscout/Provider/OPENAI/copilot.py +20 -4
- webscout/Provider/OPENAI/deepinfra.py +12 -0
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/gptoss.py +288 -0
- webscout/Provider/OPENAI/kimi.py +469 -0
- webscout/Provider/OPENAI/netwrck.py +8 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/scirachat.py +4 -0
- webscout/Provider/OPENAI/textpollinations.py +11 -10
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +30 -6
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +11 -9
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +0 -1
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TextPollinationsAI.py +11 -10
- webscout/Provider/TogetherAI.py +12 -4
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +2 -96
- webscout/Provider/cerebras.py +83 -33
- webscout/Provider/copilot.py +42 -23
- webscout/Provider/scira_chat.py +4 -0
- webscout/Provider/toolbaz.py +6 -10
- webscout/Provider/typefully.py +1 -11
- webscout/__init__.py +3 -15
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +99 -2
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/conversation.py +22 -20
- webscout/sanitize.py +1078 -0
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/METADATA +10 -149
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/RECORD +88 -87
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
# This file marks the directory as a Python package.
|
|
2
2
|
from .deepinfra import *
|
|
3
|
-
|
|
3
|
+
## glider import removed
|
|
4
4
|
from .chatgptclone import *
|
|
5
5
|
from .x0gpt import *
|
|
6
6
|
from .wisecat import *
|
|
7
7
|
from .venice import *
|
|
8
8
|
from .exaai import *
|
|
9
|
-
from .typegpt import *
|
|
10
9
|
from .scirachat import *
|
|
11
10
|
from .llmchatco import *
|
|
12
11
|
from .yep import * # Add YEPCHAT
|
|
@@ -17,7 +16,6 @@ from .netwrck import *
|
|
|
17
16
|
from .standardinput import *
|
|
18
17
|
from .writecream import *
|
|
19
18
|
from .toolbaz import *
|
|
20
|
-
from .uncovrAI import *
|
|
21
19
|
from .opkfc import *
|
|
22
20
|
from .chatgpt import *
|
|
23
21
|
from .textpollinations import *
|
|
@@ -28,7 +26,6 @@ from .ai4chat import * # Add AI4Chat
|
|
|
28
26
|
from .mcpcore import *
|
|
29
27
|
from .flowith import *
|
|
30
28
|
from .chatsandbox import *
|
|
31
|
-
from .c4ai import *
|
|
32
29
|
from .flowith import *
|
|
33
30
|
from .Cloudflare import *
|
|
34
31
|
from .NEMOTRON import *
|
|
@@ -46,6 +43,9 @@ from .friendli import *
|
|
|
46
43
|
from .monochat import *
|
|
47
44
|
from .MiniMax import * # Add MiniMaxAI provider
|
|
48
45
|
from .qodo import * # Add QodoAI provider
|
|
46
|
+
from .kimi import * # Add Kimi provider
|
|
47
|
+
from .gptoss import * # Add GPT-OSS provider
|
|
48
|
+
from .refact import * # Add Refact provider
|
|
49
49
|
# Export auto-proxy functionality
|
|
50
50
|
from .autoproxy import (
|
|
51
51
|
get_auto_proxy,
|
|
@@ -87,12 +87,22 @@ class Completions(BaseCompletions):
|
|
|
87
87
|
images.append({"type": "image", "url": r.json().get("url")})
|
|
88
88
|
|
|
89
89
|
ws = s.ws_connect(self._client.websocket_url)
|
|
90
|
-
|
|
90
|
+
# Map alias to real model name if needed
|
|
91
|
+
real_model = Copilot.MODEL_ALIASES.get(model, model)
|
|
92
|
+
if real_model not in Copilot.AVAILABLE_MODELS:
|
|
93
|
+
raise RuntimeError(f"Invalid model: {model}. Choose from: {Copilot.AVAILABLE_MODELS}")
|
|
94
|
+
if real_model == "Smart":
|
|
95
|
+
mode = "smart"
|
|
96
|
+
elif "Think" in real_model:
|
|
97
|
+
mode = "reasoning"
|
|
98
|
+
else:
|
|
99
|
+
mode = "chat"
|
|
91
100
|
ws.send(json.dumps({
|
|
92
101
|
"event": "send",
|
|
93
102
|
"conversationId": conv_id,
|
|
94
103
|
"content": images + [{"type": "text", "text": prompt_text}],
|
|
95
|
-
"mode": mode
|
|
104
|
+
"mode": mode,
|
|
105
|
+
"model": real_model
|
|
96
106
|
}).encode(), CurlWsFlag.TEXT)
|
|
97
107
|
|
|
98
108
|
prompt_tokens = count_tokens(prompt_text)
|
|
@@ -281,8 +291,14 @@ class Copilot(OpenAICompatibleProvider):
|
|
|
281
291
|
url = "https://copilot.microsoft.com"
|
|
282
292
|
conversation_url = f"{url}/c/api/conversations"
|
|
283
293
|
websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
|
|
284
|
-
|
|
285
|
-
AVAILABLE_MODELS = ["Copilot", "Think Deeper"]
|
|
294
|
+
|
|
295
|
+
AVAILABLE_MODELS = ["Copilot", "Think Deeper", "Smart"]
|
|
296
|
+
MODEL_ALIASES = {
|
|
297
|
+
"gpt-4o": "Copilot",
|
|
298
|
+
"o4-mini": "Think Deeper",
|
|
299
|
+
"gpt-5": "Smart",
|
|
300
|
+
|
|
301
|
+
}
|
|
286
302
|
|
|
287
303
|
def __init__(self, browser: str = "chrome", tools: Optional[List] = None, **kwargs):
|
|
288
304
|
self.timeout = 900
|
|
@@ -196,8 +196,13 @@ class Chat(BaseChat):
|
|
|
196
196
|
class DeepInfra(OpenAICompatibleProvider):
|
|
197
197
|
AVAILABLE_MODELS = [
|
|
198
198
|
"anthropic/claude-4-opus",
|
|
199
|
+
"moonshotai/Kimi-K2-Instruct",
|
|
199
200
|
"anthropic/claude-4-sonnet",
|
|
200
201
|
"deepseek-ai/DeepSeek-R1-0528-Turbo",
|
|
202
|
+
"Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
203
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
|
204
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo",
|
|
205
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507",
|
|
201
206
|
"Qwen/Qwen3-235B-A22B",
|
|
202
207
|
"Qwen/Qwen3-30B-A3B",
|
|
203
208
|
"Qwen/Qwen3-32B",
|
|
@@ -249,12 +254,19 @@ class DeepInfra(OpenAICompatibleProvider):
|
|
|
249
254
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
250
255
|
"microsoft/WizardLM-2-8x22B",
|
|
251
256
|
"mistralai/Devstral-Small-2505",
|
|
257
|
+
"mistralai/Devstral-Small-2507",
|
|
252
258
|
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
253
259
|
"mistralai/Mistral-Nemo-Instruct-2407",
|
|
254
260
|
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
255
261
|
"mistralai/Mistral-Small-3.2-24B-Instruct-2506",
|
|
256
262
|
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
257
263
|
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
264
|
+
"zai-org/GLM-4.5-Air",
|
|
265
|
+
"zai-org/GLM-4.5",
|
|
266
|
+
"zai-org/GLM-4.5V",
|
|
267
|
+
"openai/gpt-oss-120b",
|
|
268
|
+
"openai/gpt-oss-20b",
|
|
269
|
+
"allenai/olmOCR-7B-0725-FP8",
|
|
258
270
|
]
|
|
259
271
|
def __init__(self, browser: str = "chrome", api_key: str = None):
|
|
260
272
|
self.timeout = None
|
webscout/Provider/OPENAI/e2b.py
CHANGED
|
@@ -114,6 +114,35 @@ MODEL_PROMPT = {
|
|
|
114
114
|
}
|
|
115
115
|
}
|
|
116
116
|
},
|
|
117
|
+
"claude-opus-4-1-20250805": {
|
|
118
|
+
"apiUrl": "https://fragments.e2b.dev/api/chat",
|
|
119
|
+
"id": "claude-opus-4-1-20250805",
|
|
120
|
+
"name": "Claude Opus 4.1",
|
|
121
|
+
"Knowledge": "2024-10",
|
|
122
|
+
"provider": "Anthropic",
|
|
123
|
+
"providerId": "anthropic",
|
|
124
|
+
"multiModal": True,
|
|
125
|
+
"templates": {
|
|
126
|
+
"system": {
|
|
127
|
+
"intro": "You are Claude Opus 4.1, Anthropic's most capable AI assistant for complex reasoning and analysis. You excel at sophisticated problem-solving, creative thinking, and providing nuanced insights across a wide range of domains. You can analyze images, code, and complex data to deliver comprehensive and thoughtful responses.",
|
|
128
|
+
"principles": ["honesty", "ethics", "diligence", "helpfulness", "accuracy", "thoughtfulness", "creativity"],
|
|
129
|
+
"latex": {
|
|
130
|
+
"inline": "\\(\\nabla \\cdot \\vec{E} = \\frac{\\rho}{\\epsilon_0}\\)",
|
|
131
|
+
"block": "\\begin{align}\n\\nabla \\cdot \\vec{E} &= \\frac{\\rho}{\\epsilon_0} \\\\\n\\nabla \\times \\vec{B} &= \\mu_0\\vec{J} + \\mu_0\\epsilon_0\\frac{\\partial\\vec{E}}{\\partial t} \\\\\nE &= mc^2 \\\\\n\\psi(x,t) &= Ae^{i(kx-\\omega t)}\n\\end{align}"
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
},
|
|
135
|
+
"requestConfig": {
|
|
136
|
+
"template": {
|
|
137
|
+
"txt": {
|
|
138
|
+
"name": "chat with users and start role-playing, Above of all: Follow the latest news from users",
|
|
139
|
+
"lib": [""],
|
|
140
|
+
"file": "pages/ChatWithUsers.txt",
|
|
141
|
+
"port": 3000
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
},
|
|
117
146
|
"o1-mini": {
|
|
118
147
|
"apiUrl": "https://fragments.e2b.dev/api/chat",
|
|
119
148
|
"id": "o1-mini",
|
|
@@ -1013,6 +1042,10 @@ class Completions(BaseCompletions):
|
|
|
1013
1042
|
"""Enhanced request method with IP rotation, session rotation, and advanced rate limit bypass."""
|
|
1014
1043
|
url = model_config["apiUrl"]
|
|
1015
1044
|
target_origin = "https://fragments.e2b.dev"
|
|
1045
|
+
|
|
1046
|
+
# Use client proxies if none provided
|
|
1047
|
+
if proxies is None:
|
|
1048
|
+
proxies = getattr(self._client, "proxies", None)
|
|
1016
1049
|
|
|
1017
1050
|
for attempt in range(retries):
|
|
1018
1051
|
try:
|
|
@@ -1055,13 +1088,13 @@ class Completions(BaseCompletions):
|
|
|
1055
1088
|
|
|
1056
1089
|
json_data = json.dumps(enhanced_request_body)
|
|
1057
1090
|
|
|
1058
|
-
# Use curl_cffi session with enhanced fingerprinting
|
|
1091
|
+
# Use curl_cffi session with enhanced fingerprinting and proxy support
|
|
1059
1092
|
response = self._client.session.post(
|
|
1060
1093
|
url=url,
|
|
1061
1094
|
headers=headers,
|
|
1062
1095
|
data=json_data,
|
|
1063
1096
|
timeout=timeout or self._client.timeout,
|
|
1064
|
-
proxies=proxies
|
|
1097
|
+
proxies=proxies,
|
|
1065
1098
|
impersonate=self._client.impersonation
|
|
1066
1099
|
)
|
|
1067
1100
|
|
|
@@ -1225,17 +1258,21 @@ class E2B(OpenAICompatibleProvider):
|
|
|
1225
1258
|
'deepseek-r1-instruct': 'deepseek-r1'
|
|
1226
1259
|
}
|
|
1227
1260
|
|
|
1228
|
-
def __init__(self, retries: int = 3):
|
|
1261
|
+
def __init__(self, retries: int = 3, proxies: Optional[Dict[str, str]] = None, **kwargs):
|
|
1229
1262
|
"""
|
|
1230
1263
|
Initialize the E2B client with curl_cffi and browser fingerprinting.
|
|
1231
1264
|
|
|
1232
1265
|
Args:
|
|
1233
1266
|
retries: Number of retries for failed requests.
|
|
1267
|
+
proxies: Proxy configuration for requests.
|
|
1268
|
+
**kwargs: Additional arguments passed to parent class.
|
|
1234
1269
|
"""
|
|
1235
1270
|
self.timeout = 60 # Default timeout in seconds
|
|
1236
|
-
self.proxies = None # Default proxies
|
|
1237
1271
|
self.retries = retries
|
|
1238
|
-
|
|
1272
|
+
|
|
1273
|
+
# Handle proxy configuration
|
|
1274
|
+
self.proxies = proxies or {}
|
|
1275
|
+
|
|
1239
1276
|
# Use LitAgent for user-agent
|
|
1240
1277
|
self.headers = LitAgent().generate_fingerprint()
|
|
1241
1278
|
|
|
@@ -1243,6 +1280,20 @@ class E2B(OpenAICompatibleProvider):
|
|
|
1243
1280
|
self.impersonation = curl_requests.impersonate.DEFAULT_CHROME
|
|
1244
1281
|
self.session = curl_requests.Session()
|
|
1245
1282
|
self.session.headers.update(self.headers)
|
|
1283
|
+
|
|
1284
|
+
# Apply proxy configuration if provided
|
|
1285
|
+
if self.proxies:
|
|
1286
|
+
self.session.proxies.update(self.proxies)
|
|
1287
|
+
|
|
1288
|
+
# Initialize bypass session data
|
|
1289
|
+
self._session_rotation_data = {}
|
|
1290
|
+
self._last_rotation_time = 0
|
|
1291
|
+
self._rotation_interval = 300 # Rotate session every 5 minutes
|
|
1292
|
+
self._rate_limit_failures = 0
|
|
1293
|
+
self._max_rate_limit_failures = 3
|
|
1294
|
+
|
|
1295
|
+
# Initialize the chat interface
|
|
1296
|
+
self.chat = Chat(self)
|
|
1246
1297
|
|
|
1247
1298
|
# Initialize bypass session data
|
|
1248
1299
|
self._session_rotation_data = {}
|
|
@@ -1589,13 +1640,13 @@ if __name__ == "__main__":
|
|
|
1589
1640
|
print("-" * 80)
|
|
1590
1641
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
1591
1642
|
print("-" * 80)
|
|
1592
|
-
print("\n--- Streaming Simulation Test (
|
|
1643
|
+
print("\n--- Streaming Simulation Test (claude-opus-4-1-20250805) ---")
|
|
1593
1644
|
try:
|
|
1594
1645
|
client_stream = E2B()
|
|
1595
1646
|
stream = client_stream.chat.completions.create(
|
|
1596
|
-
model="
|
|
1647
|
+
model="claude-opus-4-1-20250805",
|
|
1597
1648
|
messages=[
|
|
1598
|
-
{"role": "user", "content": "
|
|
1649
|
+
{"role": "user", "content": "hi."}
|
|
1599
1650
|
],
|
|
1600
1651
|
stream=True
|
|
1601
1652
|
)
|
|
@@ -1607,6 +1658,7 @@ if __name__ == "__main__":
|
|
|
1607
1658
|
print(content, end="", flush=True)
|
|
1608
1659
|
full_stream_response += content
|
|
1609
1660
|
print("\n--- End of Stream ---")
|
|
1661
|
+
print(client_stream.proxies)
|
|
1610
1662
|
if not full_stream_response:
|
|
1611
1663
|
print(f"{RED}Stream test failed: No content received.{RESET}")
|
|
1612
1664
|
except Exception as e:
|
|
@@ -150,8 +150,9 @@ class Chat(BaseChat):
|
|
|
150
150
|
|
|
151
151
|
class Flowith(OpenAICompatibleProvider):
|
|
152
152
|
AVAILABLE_MODELS = [
|
|
153
|
-
"gpt-
|
|
154
|
-
"
|
|
153
|
+
"gpt-5-nano", "gpt-5-mini", "glm-4.5", "gpt-oss-120b", "gpt-oss-20b", "kimi-k2",
|
|
154
|
+
"gpt-4.1", "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner",
|
|
155
|
+
"gemini-2.5-flash", "grok-3-mini"
|
|
155
156
|
]
|
|
156
157
|
|
|
157
158
|
chat: Chat
|
|
@@ -170,7 +171,7 @@ if __name__ == "__main__":
|
|
|
170
171
|
client = Flowith()
|
|
171
172
|
messages = [{"role": "user", "content": "Hello, how are you?"}]
|
|
172
173
|
response = client.chat.completions.create(
|
|
173
|
-
model="gpt-
|
|
174
|
+
model="gpt-5-nano",
|
|
174
175
|
messages=messages,
|
|
175
176
|
stream=True
|
|
176
177
|
)
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import random
|
|
2
|
+
import string
|
|
3
|
+
|
|
4
|
+
def generate_api_key_suffix(length: int = 4) -> str:
|
|
5
|
+
"""Generate a random API key suffix like 'C1Z5'
|
|
6
|
+
|
|
7
|
+
Args:
|
|
8
|
+
length: Length of the suffix (default: 4)
|
|
9
|
+
|
|
10
|
+
Returns:
|
|
11
|
+
A random string with uppercase letters and digits
|
|
12
|
+
"""
|
|
13
|
+
# Use uppercase letters and digits for the suffix
|
|
14
|
+
chars = string.ascii_uppercase + string.digits
|
|
15
|
+
return ''.join(random.choice(chars) for _ in range(length))
|
|
16
|
+
|
|
17
|
+
def generate_full_api_key(prefix: str = "EU1CW20nX5oau42xBSgm") -> str:
|
|
18
|
+
"""Generate a full API key with the given prefix pattern
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
prefix: The base prefix to use (default uses the pattern from the example)
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
A full API key string with a random suffix like 'C1Z5'
|
|
25
|
+
"""
|
|
26
|
+
# Generate the suffix (last 4 characters like C1Z5)
|
|
27
|
+
suffix = generate_api_key_suffix(4)
|
|
28
|
+
|
|
29
|
+
# Combine prefix with the generated suffix
|
|
30
|
+
return prefix + suffix
|
|
31
|
+
|
|
32
|
+
if __name__ == "__main__":
|
|
33
|
+
# Example usage
|
|
34
|
+
print("Generate API key suffix (like C1Z5):")
|
|
35
|
+
for i in range(5):
|
|
36
|
+
suffix = generate_api_key_suffix()
|
|
37
|
+
print(f" {suffix}")
|
|
38
|
+
|
|
39
|
+
print("\nGenerate full API key with prefix:")
|
|
40
|
+
for i in range(5):
|
|
41
|
+
api_key = generate_full_api_key()
|
|
42
|
+
print(f" {api_key}")
|
|
43
|
+
|
|
44
|
+
print("\nGenerate with custom prefix:")
|
|
45
|
+
custom_prefix = "EU1CW20nX5oau42xBSgm"
|
|
46
|
+
for i in range(3):
|
|
47
|
+
api_key = generate_full_api_key(custom_prefix)
|
|
48
|
+
print(f" {api_key}")
|
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
8
|
+
from webscout.Provider.OPENAI.utils import (
|
|
9
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
10
|
+
ChatCompletionMessage, CompletionUsage, format_prompt, count_tokens
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
from webscout.litagent import LitAgent
|
|
15
|
+
except ImportError:
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
class Completions(BaseCompletions):
|
|
19
|
+
def __init__(self, client: 'GptOss'):
|
|
20
|
+
self._client = client
|
|
21
|
+
|
|
22
|
+
def create(
|
|
23
|
+
self,
|
|
24
|
+
*,
|
|
25
|
+
model: str,
|
|
26
|
+
messages: List[Dict[str, str]],
|
|
27
|
+
max_tokens: Optional[int] = 600,
|
|
28
|
+
stream: bool = False,
|
|
29
|
+
temperature: Optional[float] = None,
|
|
30
|
+
top_p: Optional[float] = None,
|
|
31
|
+
timeout: Optional[int] = None,
|
|
32
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
33
|
+
**kwargs: Any
|
|
34
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
35
|
+
# Format messages into conversation prompt
|
|
36
|
+
conversation_prompt = format_prompt(messages, add_special_tokens=False, do_continue=True)
|
|
37
|
+
|
|
38
|
+
# Count tokens for usage tracking
|
|
39
|
+
prompt_tokens = count_tokens(conversation_prompt)
|
|
40
|
+
|
|
41
|
+
payload = {
|
|
42
|
+
"op": "threads.create",
|
|
43
|
+
"params": {
|
|
44
|
+
"input": {
|
|
45
|
+
"text": conversation_prompt,
|
|
46
|
+
"content": [{"type": "input_text", "text": conversation_prompt}],
|
|
47
|
+
"quoted_text": "",
|
|
48
|
+
"attachments": []
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
54
|
+
created_time = int(time.time())
|
|
55
|
+
|
|
56
|
+
if stream:
|
|
57
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies, prompt_tokens)
|
|
58
|
+
else:
|
|
59
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies, prompt_tokens)
|
|
60
|
+
|
|
61
|
+
def _create_stream(
|
|
62
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
63
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None,
|
|
64
|
+
prompt_tokens: int = 0
|
|
65
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
66
|
+
try:
|
|
67
|
+
response = self._client.session.post(
|
|
68
|
+
self._client.base_url,
|
|
69
|
+
headers=self._client.headers,
|
|
70
|
+
json=payload,
|
|
71
|
+
stream=True,
|
|
72
|
+
timeout=timeout or self._client.timeout,
|
|
73
|
+
proxies=proxies
|
|
74
|
+
)
|
|
75
|
+
response.raise_for_status()
|
|
76
|
+
|
|
77
|
+
completion_tokens = 0
|
|
78
|
+
total_tokens = prompt_tokens
|
|
79
|
+
|
|
80
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
81
|
+
if line and line.startswith("data: "):
|
|
82
|
+
json_str = line[6:]
|
|
83
|
+
if json_str == "[DONE]":
|
|
84
|
+
break
|
|
85
|
+
try:
|
|
86
|
+
data = json.loads(json_str)
|
|
87
|
+
|
|
88
|
+
# Extract content from GptOss response format
|
|
89
|
+
content = None
|
|
90
|
+
if (data.get('type') == 'thread.item_updated' and
|
|
91
|
+
data.get('update', {}).get('type') == 'assistant_message.content_part.text_delta'):
|
|
92
|
+
content = data.get('update', {}).get('delta')
|
|
93
|
+
|
|
94
|
+
if content:
|
|
95
|
+
# Count tokens in the content chunk
|
|
96
|
+
chunk_tokens = count_tokens(content)
|
|
97
|
+
completion_tokens += chunk_tokens
|
|
98
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
99
|
+
|
|
100
|
+
delta = ChoiceDelta(
|
|
101
|
+
content=content,
|
|
102
|
+
role="assistant"
|
|
103
|
+
)
|
|
104
|
+
choice = Choice(
|
|
105
|
+
index=0,
|
|
106
|
+
delta=delta,
|
|
107
|
+
finish_reason=None
|
|
108
|
+
)
|
|
109
|
+
chunk = ChatCompletionChunk(
|
|
110
|
+
id=request_id,
|
|
111
|
+
choices=[choice],
|
|
112
|
+
created=created_time,
|
|
113
|
+
model=model
|
|
114
|
+
)
|
|
115
|
+
chunk.usage = {
|
|
116
|
+
"prompt_tokens": prompt_tokens,
|
|
117
|
+
"completion_tokens": completion_tokens,
|
|
118
|
+
"total_tokens": total_tokens,
|
|
119
|
+
"estimated_cost": None
|
|
120
|
+
}
|
|
121
|
+
yield chunk
|
|
122
|
+
except json.JSONDecodeError:
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
# Final chunk with finish_reason="stop"
|
|
126
|
+
delta = ChoiceDelta(content=None, role=None)
|
|
127
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
128
|
+
chunk = ChatCompletionChunk(
|
|
129
|
+
id=request_id,
|
|
130
|
+
choices=[choice],
|
|
131
|
+
created=created_time,
|
|
132
|
+
model=model
|
|
133
|
+
)
|
|
134
|
+
chunk.usage = {
|
|
135
|
+
"prompt_tokens": prompt_tokens,
|
|
136
|
+
"completion_tokens": completion_tokens,
|
|
137
|
+
"total_tokens": total_tokens,
|
|
138
|
+
"estimated_cost": None
|
|
139
|
+
}
|
|
140
|
+
yield chunk
|
|
141
|
+
|
|
142
|
+
except Exception as e:
|
|
143
|
+
print(f"Error during GptOss stream request: {e}")
|
|
144
|
+
raise IOError(f"GptOss request failed: {e}") from e
|
|
145
|
+
|
|
146
|
+
def _create_non_stream(
|
|
147
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
148
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None,
|
|
149
|
+
prompt_tokens: int = 0
|
|
150
|
+
) -> ChatCompletion:
|
|
151
|
+
try:
|
|
152
|
+
response = self._client.session.post(
|
|
153
|
+
self._client.base_url,
|
|
154
|
+
headers=self._client.headers,
|
|
155
|
+
json=payload,
|
|
156
|
+
stream=True, # GptOss API is event-stream only
|
|
157
|
+
timeout=timeout or self._client.timeout,
|
|
158
|
+
proxies=proxies
|
|
159
|
+
)
|
|
160
|
+
response.raise_for_status()
|
|
161
|
+
|
|
162
|
+
# Collect all chunks to form complete response
|
|
163
|
+
full_content = ""
|
|
164
|
+
completion_tokens = 0
|
|
165
|
+
|
|
166
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
167
|
+
if line and line.startswith("data: "):
|
|
168
|
+
json_str = line[6:]
|
|
169
|
+
if json_str == "[DONE]":
|
|
170
|
+
break
|
|
171
|
+
try:
|
|
172
|
+
data = json.loads(json_str)
|
|
173
|
+
|
|
174
|
+
# Extract content from GptOss response format
|
|
175
|
+
if (data.get('type') == 'thread.item_updated' and
|
|
176
|
+
data.get('update', {}).get('type') == 'assistant_message.content_part.text_delta'):
|
|
177
|
+
content = data.get('update', {}).get('delta')
|
|
178
|
+
if content:
|
|
179
|
+
full_content += content
|
|
180
|
+
except json.JSONDecodeError:
|
|
181
|
+
continue
|
|
182
|
+
|
|
183
|
+
# Count tokens in the complete response
|
|
184
|
+
completion_tokens = count_tokens(full_content)
|
|
185
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
186
|
+
|
|
187
|
+
message = ChatCompletionMessage(
|
|
188
|
+
role="assistant",
|
|
189
|
+
content=full_content
|
|
190
|
+
)
|
|
191
|
+
choice = Choice(
|
|
192
|
+
index=0,
|
|
193
|
+
message=message,
|
|
194
|
+
finish_reason="stop"
|
|
195
|
+
)
|
|
196
|
+
usage = CompletionUsage(
|
|
197
|
+
prompt_tokens=prompt_tokens,
|
|
198
|
+
completion_tokens=completion_tokens,
|
|
199
|
+
total_tokens=total_tokens
|
|
200
|
+
)
|
|
201
|
+
completion = ChatCompletion(
|
|
202
|
+
id=request_id,
|
|
203
|
+
choices=[choice],
|
|
204
|
+
created=created_time,
|
|
205
|
+
model=model,
|
|
206
|
+
usage=usage
|
|
207
|
+
)
|
|
208
|
+
return completion
|
|
209
|
+
|
|
210
|
+
except Exception as e:
|
|
211
|
+
print(f"Error during GptOss non-stream request: {e}")
|
|
212
|
+
raise IOError(f"GptOss request failed: {e}") from e
|
|
213
|
+
|
|
214
|
+
class Chat(BaseChat):
|
|
215
|
+
def __init__(self, client: 'GptOss'):
|
|
216
|
+
self.completions = Completions(client)
|
|
217
|
+
|
|
218
|
+
class GptOss(OpenAICompatibleProvider):
|
|
219
|
+
AVAILABLE_MODELS = ["gpt-oss-20b", "gpt-oss-120b"]
|
|
220
|
+
|
|
221
|
+
def __init__(
|
|
222
|
+
self,
|
|
223
|
+
browser: str = "chrome",
|
|
224
|
+
api_key: str = None,
|
|
225
|
+
model: str = "gpt-oss-120b",
|
|
226
|
+
reasoning_effort: str = "high",
|
|
227
|
+
timeout: int = 30,
|
|
228
|
+
**kwargs
|
|
229
|
+
):
|
|
230
|
+
super().__init__(api_key=api_key, **kwargs)
|
|
231
|
+
self.timeout = timeout
|
|
232
|
+
self.base_url = "https://api.gpt-oss.com/chatkit"
|
|
233
|
+
self.model = model if model in self.AVAILABLE_MODELS else self.AVAILABLE_MODELS[0]
|
|
234
|
+
self.reasoning_effort = reasoning_effort
|
|
235
|
+
self.session = requests.Session()
|
|
236
|
+
|
|
237
|
+
# Generate headers using LitAgent
|
|
238
|
+
try:
|
|
239
|
+
agent = LitAgent()
|
|
240
|
+
fingerprint = agent.generate_fingerprint(browser)
|
|
241
|
+
self.headers = {
|
|
242
|
+
"Accept": "text/event-stream",
|
|
243
|
+
"Accept-Encoding": fingerprint.get("accept_encoding", "gzip, deflate, br"),
|
|
244
|
+
"Accept-Language": fingerprint.get("accept_language", "en-US,en;q=0.9"),
|
|
245
|
+
"Content-Type": "application/json",
|
|
246
|
+
"Cache-Control": "no-cache",
|
|
247
|
+
"Connection": "keep-alive",
|
|
248
|
+
"Pragma": "no-cache",
|
|
249
|
+
"User-Agent": fingerprint.get("user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"),
|
|
250
|
+
"x-reasoning-effort": self.reasoning_effort,
|
|
251
|
+
"x-selected-model": self.model,
|
|
252
|
+
"x-show-reasoning": "true"
|
|
253
|
+
}
|
|
254
|
+
except:
|
|
255
|
+
# Fallback headers if LitAgent fails
|
|
256
|
+
self.headers = {
|
|
257
|
+
"Accept": "text/event-stream",
|
|
258
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
259
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
260
|
+
"Content-Type": "application/json",
|
|
261
|
+
"Cache-Control": "no-cache",
|
|
262
|
+
"Connection": "keep-alive",
|
|
263
|
+
"Pragma": "no-cache",
|
|
264
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
|
|
265
|
+
"x-reasoning-effort": self.reasoning_effort,
|
|
266
|
+
"x-selected-model": self.model,
|
|
267
|
+
"x-show-reasoning": "true"
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
self.session.headers.update(self.headers)
|
|
271
|
+
self.chat = Chat(self)
|
|
272
|
+
|
|
273
|
+
@property
|
|
274
|
+
def models(self):
|
|
275
|
+
class _ModelList:
|
|
276
|
+
def list(inner_self):
|
|
277
|
+
return type(self).AVAILABLE_MODELS
|
|
278
|
+
return _ModelList()
|
|
279
|
+
|
|
280
|
+
if __name__ == "__main__":
|
|
281
|
+
client = GptOss()
|
|
282
|
+
response = client.chat.completions.create(
|
|
283
|
+
model="gpt-oss-120b",
|
|
284
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
285
|
+
max_tokens=100,
|
|
286
|
+
stream=False
|
|
287
|
+
)
|
|
288
|
+
print(response)
|