webscout 2025.10.17__py3-none-any.whl → 2025.10.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Provider/DeepAI.py +362 -0
- webscout/Provider/Gradient.py +231 -0
- webscout/Provider/OPENAI/DeepAI.py +300 -0
- webscout/Provider/TogetherAI.py +139 -199
- webscout/version.py +1 -1
- webscout/version.py.bak +1 -1
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/METADATA +1 -1
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/RECORD +12 -9
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/WHEEL +0 -0
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/entry_points.txt +0 -0
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-2025.10.17.dist-info → webscout-2025.10.19.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
"""
|
|
2
|
+
DeepAI Chat Provider for webscout
|
|
3
|
+
|
|
4
|
+
This provider implements the DeepAI chat API discovered through reverse engineering.
|
|
5
|
+
The API uses a POST endpoint with multipart/form-data containing chat history and parameters.
|
|
6
|
+
|
|
7
|
+
API Details:
|
|
8
|
+
- Endpoint: https://api.deepai.org/hacking_is_a_serious_crime
|
|
9
|
+
- Method: POST
|
|
10
|
+
- Authentication: api-key header (trial key provided)
|
|
11
|
+
- Content-Type: multipart/form-data
|
|
12
|
+
- Response: Plain text AI response
|
|
13
|
+
|
|
14
|
+
Features:
|
|
15
|
+
- Streaming and non-streaming support
|
|
16
|
+
- Conversation history management
|
|
17
|
+
- Error handling and retries
|
|
18
|
+
- Configurable model and chat style
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
22
|
+
|
|
23
|
+
# Import requests for HTTP requests
|
|
24
|
+
import requests
|
|
25
|
+
|
|
26
|
+
# Standard library imports
|
|
27
|
+
import json
|
|
28
|
+
import time
|
|
29
|
+
import uuid
|
|
30
|
+
|
|
31
|
+
# Import base classes and utility structures
|
|
32
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
33
|
+
from .utils import (
|
|
34
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
35
|
+
ChatCompletionMessage, CompletionUsage
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# Attempt to import LitAgent, fallback if not available
|
|
39
|
+
try:
|
|
40
|
+
from webscout.litagent import LitAgent
|
|
41
|
+
except ImportError:
|
|
42
|
+
LitAgent = None
|
|
43
|
+
|
|
44
|
+
# --- DeepAI Client ---
|
|
45
|
+
|
|
46
|
+
class Completions(BaseCompletions):
|
|
47
|
+
def __init__(self, client: 'DeepAI'):
|
|
48
|
+
self._client = client
|
|
49
|
+
|
|
50
|
+
def create(
|
|
51
|
+
self,
|
|
52
|
+
*,
|
|
53
|
+
model: str,
|
|
54
|
+
messages: List[Dict[str, str]],
|
|
55
|
+
max_tokens: Optional[int] = 2049,
|
|
56
|
+
stream: bool = False,
|
|
57
|
+
temperature: Optional[float] = None,
|
|
58
|
+
top_p: Optional[float] = None,
|
|
59
|
+
tools: Optional[List[Union[Dict[str, Any], Any]]] = None,
|
|
60
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
61
|
+
timeout: Optional[int] = None,
|
|
62
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
63
|
+
**kwargs: Any
|
|
64
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
65
|
+
"""
|
|
66
|
+
Creates a model response for the given chat conversation.
|
|
67
|
+
Mimics openai.chat.completions.create
|
|
68
|
+
"""
|
|
69
|
+
payload = {
|
|
70
|
+
"chat_style": self._client.chat_style,
|
|
71
|
+
"chatHistory": json.dumps(messages),
|
|
72
|
+
"model": model,
|
|
73
|
+
"hacker_is_stinky": "very_stinky",
|
|
74
|
+
"enabled_tools": json.dumps(self._client.enabled_tools)
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
# Add optional parameters if provided
|
|
78
|
+
if max_tokens is not None and max_tokens > 0:
|
|
79
|
+
payload["max_tokens"] = max_tokens
|
|
80
|
+
|
|
81
|
+
if temperature is not None:
|
|
82
|
+
payload["temperature"] = temperature
|
|
83
|
+
|
|
84
|
+
if top_p is not None:
|
|
85
|
+
payload["top_p"] = top_p
|
|
86
|
+
|
|
87
|
+
# Add any additional parameters
|
|
88
|
+
payload.update(kwargs)
|
|
89
|
+
|
|
90
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
91
|
+
created_time = int(time.time())
|
|
92
|
+
|
|
93
|
+
if stream:
|
|
94
|
+
return self._create_stream(request_id, created_time, model, payload)
|
|
95
|
+
else:
|
|
96
|
+
return self._create_non_stream(request_id, created_time, model, payload)
|
|
97
|
+
|
|
98
|
+
def _create_stream(
|
|
99
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
100
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
101
|
+
# DeepAI doesn't actually support streaming, but we'll implement it for compatibility
|
|
102
|
+
# For now, just yield the non-stream response as a single chunk
|
|
103
|
+
try:
|
|
104
|
+
response = self._client.session.post(
|
|
105
|
+
"https://api.deepai.org/hacking_is_a_serious_crime",
|
|
106
|
+
data=payload,
|
|
107
|
+
timeout=self._client.timeout,
|
|
108
|
+
impersonate="chrome110" # Use impersonate for better compatibility
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
if response.status_code != 200:
|
|
112
|
+
raise IOError(f"DeepAI request failed with status code {response.status_code}: {response.text}")
|
|
113
|
+
|
|
114
|
+
# Get response text
|
|
115
|
+
content = response.text.strip()
|
|
116
|
+
|
|
117
|
+
# Create the delta object
|
|
118
|
+
delta = ChoiceDelta(
|
|
119
|
+
content=content,
|
|
120
|
+
role="assistant",
|
|
121
|
+
tool_calls=None
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
# Create the choice object
|
|
125
|
+
choice = Choice(
|
|
126
|
+
index=0,
|
|
127
|
+
delta=delta,
|
|
128
|
+
finish_reason="stop",
|
|
129
|
+
logprobs=None
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# Create the chunk object
|
|
133
|
+
chunk = ChatCompletionChunk(
|
|
134
|
+
id=request_id,
|
|
135
|
+
choices=[choice],
|
|
136
|
+
created=created_time,
|
|
137
|
+
model=model,
|
|
138
|
+
system_fingerprint=None
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# Set usage directly on the chunk object (estimated)
|
|
142
|
+
chunk.usage = {
|
|
143
|
+
"prompt_tokens": len(json.dumps(payload.get("chatHistory", []))),
|
|
144
|
+
"completion_tokens": len(content),
|
|
145
|
+
"total_tokens": len(json.dumps(payload.get("chatHistory", []))) + len(content),
|
|
146
|
+
"estimated_cost": None
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
yield chunk
|
|
150
|
+
|
|
151
|
+
except Exception as e:
|
|
152
|
+
print(f"Error during DeepAI stream request: {e}")
|
|
153
|
+
raise IOError(f"DeepAI request failed: {e}") from e
|
|
154
|
+
|
|
155
|
+
def _create_non_stream(
|
|
156
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
157
|
+
) -> ChatCompletion:
|
|
158
|
+
try:
|
|
159
|
+
response = self._client.session.post(
|
|
160
|
+
"https://api.deepai.org/hacking_is_a_serious_crime",
|
|
161
|
+
data=payload,
|
|
162
|
+
timeout=self._client.timeout,
|
|
163
|
+
impersonate="chrome110" # Use impersonate for better compatibility
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
if response.status_code != 200:
|
|
167
|
+
raise IOError(f"DeepAI request failed with status code {response.status_code}: {response.text}")
|
|
168
|
+
|
|
169
|
+
# Get response text
|
|
170
|
+
content = response.text.strip()
|
|
171
|
+
|
|
172
|
+
# Create the message object
|
|
173
|
+
message = ChatCompletionMessage(
|
|
174
|
+
role="assistant",
|
|
175
|
+
content=content
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# Create the choice object
|
|
179
|
+
choice = Choice(
|
|
180
|
+
index=0,
|
|
181
|
+
message=message,
|
|
182
|
+
finish_reason="stop"
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
# Create the usage object (estimated)
|
|
186
|
+
usage = CompletionUsage(
|
|
187
|
+
prompt_tokens=len(json.dumps(payload.get("chatHistory", []))),
|
|
188
|
+
completion_tokens=len(content),
|
|
189
|
+
total_tokens=len(json.dumps(payload.get("chatHistory", []))) + len(content)
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
# Create the completion object
|
|
193
|
+
completion = ChatCompletion(
|
|
194
|
+
id=request_id,
|
|
195
|
+
choices=[choice],
|
|
196
|
+
created=created_time,
|
|
197
|
+
model=model,
|
|
198
|
+
usage=usage,
|
|
199
|
+
)
|
|
200
|
+
return completion
|
|
201
|
+
|
|
202
|
+
except Exception as e:
|
|
203
|
+
print(f"Error during DeepAI non-stream request: {e}")
|
|
204
|
+
raise IOError(f"DeepAI request failed: {e}") from e
|
|
205
|
+
|
|
206
|
+
class Chat(BaseChat):
|
|
207
|
+
def __init__(self, client: 'DeepAI'):
|
|
208
|
+
self.completions = Completions(client)
|
|
209
|
+
|
|
210
|
+
class DeepAI(OpenAICompatibleProvider):
|
|
211
|
+
AVAILABLE_MODELS = [
|
|
212
|
+
"standard",
|
|
213
|
+
"genius",
|
|
214
|
+
"online",
|
|
215
|
+
"supergenius",
|
|
216
|
+
"onlinegenius",
|
|
217
|
+
"deepseek-v3.2",
|
|
218
|
+
"gemini-2.5-flash-lite",
|
|
219
|
+
"qwen3-30b-a3b",
|
|
220
|
+
"gpt-5-nano",
|
|
221
|
+
"gpt-oss-120b",
|
|
222
|
+
"gpt-5-chat-latest",
|
|
223
|
+
"claude-opus-4-1",
|
|
224
|
+
"llama-4-scout",
|
|
225
|
+
"claude-4.5-sonnet",
|
|
226
|
+
"deepseek-v3.1-terminus",
|
|
227
|
+
"llama-3.3-70b-instruct",
|
|
228
|
+
"grok-4",
|
|
229
|
+
"claude-sonnet-4",
|
|
230
|
+
"qwen3-coder",
|
|
231
|
+
"gpt-5",
|
|
232
|
+
"kimi-k2-0905",
|
|
233
|
+
"claude-opus-4",
|
|
234
|
+
"gpt-5-mini",
|
|
235
|
+
"gemini-2.5-pro",
|
|
236
|
+
"grok-code-fast-1",
|
|
237
|
+
"gpt-4.1",
|
|
238
|
+
|
|
239
|
+
]
|
|
240
|
+
|
|
241
|
+
def __init__(
|
|
242
|
+
self,
|
|
243
|
+
api_key: str = "tryit-53926507126-2c8a2543c7b5638ca6b92b6e53ef2d2b",
|
|
244
|
+
timeout: Optional[int] = 30,
|
|
245
|
+
browser: str = "chrome",
|
|
246
|
+
model: str = "standard",
|
|
247
|
+
chat_style: str = "chat",
|
|
248
|
+
enabled_tools: Optional[List[str]] = None,
|
|
249
|
+
**kwargs
|
|
250
|
+
):
|
|
251
|
+
self.timeout = timeout
|
|
252
|
+
self.api_key = api_key
|
|
253
|
+
self.model = model
|
|
254
|
+
self.chat_style = chat_style
|
|
255
|
+
self.enabled_tools = enabled_tools or ["image_generator"]
|
|
256
|
+
|
|
257
|
+
# Initialize requests Session
|
|
258
|
+
self.session = requests.Session()
|
|
259
|
+
|
|
260
|
+
# Set up headers with API key
|
|
261
|
+
self.headers = {
|
|
262
|
+
"Content-Type": "application/x-www-form-urlencoded",
|
|
263
|
+
"api-key": self.api_key,
|
|
264
|
+
"Accept": "text/plain, */*",
|
|
265
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
266
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
267
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
# Update session headers
|
|
271
|
+
self.session.headers.update(self.headers) # Initialize chat interface
|
|
272
|
+
self.chat = Chat(self)
|
|
273
|
+
|
|
274
|
+
@classmethod
|
|
275
|
+
def get_models(cls, api_key: str = None):
|
|
276
|
+
"""Fetch available models from DeepAI API.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
api_key (str, optional): DeepAI API key. If not provided, returns default models.
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
list: List of available model IDs
|
|
283
|
+
"""
|
|
284
|
+
return cls.AVAILABLE_MODELS
|
|
285
|
+
|
|
286
|
+
@property
|
|
287
|
+
def models(self):
|
|
288
|
+
class _ModelList:
|
|
289
|
+
def list(inner_self):
|
|
290
|
+
return type(self).AVAILABLE_MODELS
|
|
291
|
+
return _ModelList()
|
|
292
|
+
|
|
293
|
+
if __name__ == "__main__":
|
|
294
|
+
client = DeepAI()
|
|
295
|
+
response = client.chat.completions.create(
|
|
296
|
+
model="standard",
|
|
297
|
+
messages=[{"role": "user", "content": "Hello!"}],
|
|
298
|
+
stream=False
|
|
299
|
+
)
|
|
300
|
+
print(response.choices[0].message.content)
|