webscout 7.9__py3-none-any.whl → 8.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -103
- webscout/Provider/AISEARCH/DeepFind.py +1 -1
- webscout/Provider/AISEARCH/ISou.py +1 -1
- webscout/Provider/AISEARCH/__init__.py +6 -1
- webscout/Provider/AISEARCH/felo_search.py +1 -1
- webscout/Provider/AISEARCH/genspark_search.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +194 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +320 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/AllenAI.py +255 -122
- webscout/Provider/DeepSeek.py +1 -2
- webscout/Provider/Deepinfra.py +17 -9
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +8 -1
- webscout/Provider/GithubChat.py +2 -1
- webscout/Provider/Jadve.py +2 -2
- webscout/Provider/Netwrck.py +3 -2
- webscout/Provider/OPENAI/__init__.py +17 -0
- webscout/Provider/OPENAI/base.py +46 -0
- webscout/Provider/OPENAI/c4ai.py +347 -0
- webscout/Provider/OPENAI/chatgptclone.py +460 -0
- webscout/Provider/OPENAI/deepinfra.py +284 -0
- webscout/Provider/OPENAI/exaai.py +419 -0
- webscout/Provider/OPENAI/exachat.py +421 -0
- webscout/Provider/OPENAI/freeaichat.py +355 -0
- webscout/Provider/OPENAI/glider.py +314 -0
- webscout/Provider/OPENAI/heckai.py +337 -0
- webscout/Provider/OPENAI/llmchatco.py +325 -0
- webscout/Provider/OPENAI/netwrck.py +348 -0
- webscout/Provider/OPENAI/scirachat.py +459 -0
- webscout/Provider/OPENAI/sonus.py +294 -0
- webscout/Provider/OPENAI/typegpt.py +361 -0
- webscout/Provider/OPENAI/utils.py +211 -0
- webscout/Provider/OPENAI/venice.py +428 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/x0gpt.py +389 -0
- webscout/Provider/OPENAI/yep.py +329 -0
- webscout/Provider/OpenGPT.py +199 -0
- webscout/Provider/PI.py +39 -24
- webscout/Provider/Venice.py +1 -1
- webscout/Provider/Youchat.py +326 -296
- webscout/Provider/__init__.py +16 -6
- webscout/Provider/ai4chat.py +58 -56
- webscout/Provider/akashgpt.py +34 -22
- webscout/Provider/freeaichat.py +1 -1
- webscout/Provider/labyrinth.py +121 -20
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/scira_chat.py +274 -0
- webscout/Provider/typefully.py +280 -0
- webscout/Provider/typegpt.py +3 -184
- webscout/prompt_manager.py +2 -1
- webscout/version.py +1 -1
- webscout/webscout_search.py +118 -54
- webscout/webscout_search_async.py +109 -45
- webscout-8.1.dist-info/METADATA +683 -0
- {webscout-7.9.dist-info → webscout-8.1.dist-info}/RECORD +67 -33
- webscout/Provider/flowith.py +0 -207
- webscout-7.9.dist-info/METADATA +0 -995
- {webscout-7.9.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
- {webscout-7.9.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
- {webscout-7.9.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
- {webscout-7.9.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
webscout/Provider/AllenAI.py
CHANGED
|
@@ -17,13 +17,33 @@ class AllenAI(Provider):
|
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
19
|
AVAILABLE_MODELS = [
|
|
20
|
+
'OLMo-2-1124-13B-Instruct',
|
|
21
|
+
'Llama-3-1-Tulu-3-8B',
|
|
22
|
+
'olmo-2-0325-32b-instruct',
|
|
23
|
+
'Llama-3-1-Tulu-3-70B',
|
|
24
|
+
'OLMoE-1B-7B-0924-Instruct',
|
|
20
25
|
'tulu3-405b',
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
# 'olmoe-0125'
|
|
26
|
+
'olmo-2-0325-32b-instruct',
|
|
27
|
+
'tulu-3-1-8b',
|
|
28
|
+
'olmoe-0125'
|
|
25
29
|
]
|
|
26
30
|
|
|
31
|
+
# Default model options from JS implementation
|
|
32
|
+
DEFAULT_OPTIONS = {
|
|
33
|
+
"max_tokens": 2048,
|
|
34
|
+
"temperature": 0.7,
|
|
35
|
+
"top_p": 1,
|
|
36
|
+
"n": 1,
|
|
37
|
+
"stop": None,
|
|
38
|
+
"logprobs": None
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
# Host mapping for models - some models work best with specific hosts
|
|
42
|
+
MODEL_HOST_MAP = {
|
|
43
|
+
'tulu3-405b': 'inferd',
|
|
44
|
+
'tulu2': 'inferd',
|
|
45
|
+
'olmo-7b-instruct': 'inferd'
|
|
46
|
+
}
|
|
27
47
|
|
|
28
48
|
def __init__(
|
|
29
49
|
self,
|
|
@@ -36,45 +56,61 @@ class AllenAI(Provider):
|
|
|
36
56
|
proxies: dict = {},
|
|
37
57
|
history_offset: int = 10250,
|
|
38
58
|
act: str = None,
|
|
39
|
-
model: str = "
|
|
40
|
-
|
|
59
|
+
model: str = "OLMo-2-1124-13B-Instruct",
|
|
60
|
+
host: str = None # Now optional - will auto-detect if not provided
|
|
41
61
|
):
|
|
42
62
|
"""Initializes the AllenAI API client."""
|
|
43
63
|
if model not in self.AVAILABLE_MODELS:
|
|
44
64
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
45
65
|
|
|
46
66
|
self.url = "https://playground.allenai.org"
|
|
47
|
-
|
|
67
|
+
# Updated API endpoint to v3 from v4
|
|
68
|
+
self.api_endpoint = "https://olmo-api.allen.ai/v3/message/stream"
|
|
69
|
+
self.whoami_endpoint = "https://olmo-api.allen.ai/v3/whoami"
|
|
48
70
|
|
|
49
|
-
#
|
|
71
|
+
# Updated headers based on JS implementation
|
|
50
72
|
self.headers = {
|
|
51
|
-
'User-Agent':
|
|
73
|
+
'User-Agent': "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36",
|
|
52
74
|
'Accept': '*/*',
|
|
53
|
-
'Accept-Language': '
|
|
75
|
+
'Accept-Language': 'id-ID,id;q=0.9',
|
|
54
76
|
'Origin': self.url,
|
|
55
77
|
'Referer': f"{self.url}/",
|
|
56
78
|
'Connection': 'keep-alive',
|
|
57
79
|
'Cache-Control': 'no-cache',
|
|
58
80
|
'Pragma': 'no-cache',
|
|
81
|
+
'Priority': 'u=1, i',
|
|
59
82
|
'Sec-Fetch-Dest': 'empty',
|
|
60
83
|
'Sec-Fetch-Mode': 'cors',
|
|
61
|
-
'Sec-Fetch-Site': '
|
|
62
|
-
'sec-ch-ua': '"
|
|
63
|
-
'sec-ch-ua-mobile': '?
|
|
64
|
-
'sec-ch-ua-platform': '"
|
|
84
|
+
'Sec-Fetch-Site': 'cross-site',
|
|
85
|
+
'sec-ch-ua': '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
|
|
86
|
+
'sec-ch-ua-mobile': '?1',
|
|
87
|
+
'sec-ch-ua-platform': '"Android"',
|
|
88
|
+
'Content-Type': 'application/json'
|
|
65
89
|
}
|
|
66
90
|
|
|
67
91
|
self.session = requests.Session()
|
|
68
92
|
self.session.headers.update(self.headers)
|
|
69
93
|
self.session.proxies.update(proxies)
|
|
70
94
|
self.model = model
|
|
95
|
+
|
|
96
|
+
# Auto-detect host if not provided
|
|
97
|
+
if not host:
|
|
98
|
+
# Use the preferred host from the model-host map, or default to modal
|
|
99
|
+
self.host = self.MODEL_HOST_MAP.get(model, 'modal')
|
|
100
|
+
else:
|
|
101
|
+
self.host = host
|
|
102
|
+
|
|
71
103
|
self.is_conversation = is_conversation
|
|
72
104
|
self.max_tokens_to_sample = max_tokens
|
|
73
105
|
self.timeout = timeout
|
|
74
106
|
self.last_response = {}
|
|
75
107
|
# Generate user ID if needed
|
|
76
|
-
self.x_anonymous_user_id =
|
|
108
|
+
self.x_anonymous_user_id = None
|
|
77
109
|
self.parent = None
|
|
110
|
+
|
|
111
|
+
# Default options
|
|
112
|
+
self.options = self.DEFAULT_OPTIONS.copy()
|
|
113
|
+
self.options["max_tokens"] = max_tokens
|
|
78
114
|
|
|
79
115
|
self.__available_optimizers = (
|
|
80
116
|
method
|
|
@@ -94,6 +130,44 @@ class AllenAI(Provider):
|
|
|
94
130
|
)
|
|
95
131
|
self.conversation.history_offset = history_offset
|
|
96
132
|
|
|
133
|
+
def whoami(self):
|
|
134
|
+
"""Gets or creates a user ID for authentication with Allen AI API"""
|
|
135
|
+
temp_id = str(uuid4())
|
|
136
|
+
headers = self.session.headers.copy()
|
|
137
|
+
headers.update({"x-anonymous-user-id": temp_id})
|
|
138
|
+
|
|
139
|
+
try:
|
|
140
|
+
response = self.session.get(
|
|
141
|
+
self.whoami_endpoint,
|
|
142
|
+
headers=headers,
|
|
143
|
+
timeout=self.timeout
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
if response.status_code == 200:
|
|
147
|
+
data = response.json()
|
|
148
|
+
self.x_anonymous_user_id = data.get("client", temp_id)
|
|
149
|
+
return data
|
|
150
|
+
else:
|
|
151
|
+
self.x_anonymous_user_id = temp_id
|
|
152
|
+
return {"client": temp_id}
|
|
153
|
+
|
|
154
|
+
except Exception as e:
|
|
155
|
+
self.x_anonymous_user_id = temp_id
|
|
156
|
+
return {"client": temp_id, "error": str(e)}
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def parse_stream(self, raw_data):
|
|
160
|
+
"""Parse the raw streaming data according to the JS implementation"""
|
|
161
|
+
result = ""
|
|
162
|
+
for line in raw_data.splitlines():
|
|
163
|
+
try:
|
|
164
|
+
parsed = json.loads(line)
|
|
165
|
+
# Check if message starts with msg_ pattern
|
|
166
|
+
if parsed.get("message", "").startswith("msg_"):
|
|
167
|
+
result += parsed.get("content", "")
|
|
168
|
+
except:
|
|
169
|
+
continue
|
|
170
|
+
return result
|
|
97
171
|
|
|
98
172
|
def ask(
|
|
99
173
|
self,
|
|
@@ -102,10 +176,11 @@ class AllenAI(Provider):
|
|
|
102
176
|
raw: bool = False,
|
|
103
177
|
optimizer: str = None,
|
|
104
178
|
conversationally: bool = False,
|
|
105
|
-
host: str =
|
|
106
|
-
private: bool =
|
|
179
|
+
host: str = None,
|
|
180
|
+
private: bool = False,
|
|
107
181
|
top_p: float = None,
|
|
108
182
|
temperature: float = None,
|
|
183
|
+
options: dict = None,
|
|
109
184
|
) -> Union[Dict[str, Any], Generator]:
|
|
110
185
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
111
186
|
if optimizer:
|
|
@@ -116,126 +191,163 @@ class AllenAI(Provider):
|
|
|
116
191
|
else:
|
|
117
192
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
118
193
|
|
|
119
|
-
#
|
|
120
|
-
|
|
194
|
+
# Ensure we have a user ID
|
|
195
|
+
if not self.x_anonymous_user_id:
|
|
196
|
+
self.whoami()
|
|
121
197
|
|
|
122
|
-
#
|
|
198
|
+
# Prepare the API request
|
|
123
199
|
self.session.headers.update({
|
|
124
|
-
"
|
|
125
|
-
"
|
|
200
|
+
"x-anonymous-user-id": self.x_anonymous_user_id,
|
|
201
|
+
"Content-Type": "application/json"
|
|
126
202
|
})
|
|
127
203
|
|
|
128
|
-
|
|
129
|
-
|
|
204
|
+
# Create options dictionary
|
|
205
|
+
opts = self.options.copy()
|
|
206
|
+
if temperature is not None:
|
|
207
|
+
opts["temperature"] = temperature
|
|
208
|
+
if top_p is not None:
|
|
209
|
+
opts["top_p"] = top_p
|
|
210
|
+
if options:
|
|
211
|
+
opts.update(options)
|
|
130
212
|
|
|
131
|
-
#
|
|
132
|
-
|
|
133
|
-
f'--{boundary}\r\n'
|
|
134
|
-
f'Content-Disposition: form-data; name="model"\r\n\r\n{self.model}\r\n',
|
|
135
|
-
|
|
136
|
-
f'--{boundary}\r\n'
|
|
137
|
-
f'Content-Disposition: form-data; name="host"\r\n\r\n{host}\r\n',
|
|
138
|
-
|
|
139
|
-
f'--{boundary}\r\n'
|
|
140
|
-
f'Content-Disposition: form-data; name="content"\r\n\r\n{messages}\r\n',
|
|
141
|
-
|
|
142
|
-
f'--{boundary}\r\n'
|
|
143
|
-
f'Content-Disposition: form-data; name="private"\r\n\r\n{str(private).lower()}\r\n'
|
|
144
|
-
]
|
|
213
|
+
# Use the host param or the default host
|
|
214
|
+
use_host = host or self.host
|
|
145
215
|
|
|
146
|
-
#
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
)
|
|
216
|
+
# List of hosts to try - start with provided host, then try alternative hosts
|
|
217
|
+
hosts_to_try = [use_host]
|
|
218
|
+
if use_host == 'modal':
|
|
219
|
+
hosts_to_try.append('inferd')
|
|
220
|
+
else:
|
|
221
|
+
hosts_to_try.append('modal')
|
|
152
222
|
|
|
153
|
-
|
|
154
|
-
if temperature is not None:
|
|
155
|
-
form_data.append(
|
|
156
|
-
f'--{boundary}\r\n'
|
|
157
|
-
f'Content-Disposition: form-data; name="temperature"\r\n\r\n{temperature}\r\n'
|
|
158
|
-
)
|
|
223
|
+
last_error = None
|
|
159
224
|
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
225
|
+
# Try each host until one works
|
|
226
|
+
for current_host in hosts_to_try:
|
|
227
|
+
# Create the JSON payload as per the JS implementation
|
|
228
|
+
payload = {
|
|
229
|
+
"content": conversation_prompt,
|
|
230
|
+
"private": private,
|
|
231
|
+
"model": self.model,
|
|
232
|
+
"host": current_host,
|
|
233
|
+
"opts": opts
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
# Add parent if exists
|
|
237
|
+
if self.parent:
|
|
238
|
+
payload["parent"] = self.parent
|
|
239
|
+
|
|
240
|
+
try:
|
|
241
|
+
if stream:
|
|
242
|
+
return self._stream_request(payload, prompt, raw)
|
|
243
|
+
else:
|
|
244
|
+
return self._non_stream_request(payload, prompt)
|
|
245
|
+
except exceptions.FailedToGenerateResponseError as e:
|
|
246
|
+
last_error = e
|
|
247
|
+
# Log the error but continue to try other hosts
|
|
248
|
+
print(f"Host '{current_host}' failed for model '{self.model}', trying next host...")
|
|
249
|
+
continue
|
|
165
250
|
|
|
166
|
-
|
|
167
|
-
|
|
251
|
+
# If we've tried all hosts and none worked, raise the last error
|
|
252
|
+
raise last_error or exceptions.FailedToGenerateResponseError("All hosts failed. Unable to complete request.")
|
|
168
253
|
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
254
|
+
def _stream_request(self, payload, prompt, raw=False):
|
|
255
|
+
"""Handle streaming requests with the given payload"""
|
|
256
|
+
try:
|
|
257
|
+
response = self.session.post(
|
|
258
|
+
self.api_endpoint,
|
|
259
|
+
json=payload,
|
|
260
|
+
stream=True,
|
|
261
|
+
timeout=self.timeout
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
if response.status_code != 200:
|
|
265
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
266
|
+
f"Request failed with status code {response.status_code}: {response.text}"
|
|
177
267
|
)
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
for
|
|
188
|
-
|
|
268
|
+
|
|
269
|
+
streaming_text = ""
|
|
270
|
+
current_parent = None
|
|
271
|
+
|
|
272
|
+
for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
|
|
273
|
+
if not chunk:
|
|
274
|
+
continue
|
|
275
|
+
|
|
276
|
+
decoded = chunk.decode(errors="ignore")
|
|
277
|
+
for line in decoded.splitlines():
|
|
278
|
+
line = line.strip()
|
|
279
|
+
if not line:
|
|
280
|
+
continue
|
|
281
|
+
|
|
282
|
+
try:
|
|
283
|
+
data = json.loads(line)
|
|
284
|
+
except json.JSONDecodeError:
|
|
189
285
|
continue
|
|
286
|
+
|
|
287
|
+
if isinstance(data, dict):
|
|
288
|
+
# Check for message pattern from JS implementation
|
|
289
|
+
if data.get("message", "").startswith("msg_") and "content" in data:
|
|
290
|
+
content = data.get("content", "")
|
|
291
|
+
if content:
|
|
292
|
+
streaming_text += content
|
|
293
|
+
resp = dict(text=content)
|
|
294
|
+
yield resp if raw else resp
|
|
190
295
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
296
|
+
# Legacy handling for older API
|
|
297
|
+
elif "message" in data and data.get("content"):
|
|
298
|
+
content = data.get("content")
|
|
299
|
+
if content.strip():
|
|
300
|
+
streaming_text += content
|
|
301
|
+
resp = dict(text=content)
|
|
302
|
+
yield resp if raw else resp
|
|
196
303
|
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
304
|
+
# Update parent ID if present
|
|
305
|
+
if data.get("id"):
|
|
306
|
+
current_parent = data.get("id")
|
|
307
|
+
elif data.get("children"):
|
|
308
|
+
for child in data["children"]:
|
|
309
|
+
if child.get("role") == "assistant":
|
|
310
|
+
current_parent = child.get("id")
|
|
311
|
+
break
|
|
201
312
|
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
if
|
|
205
|
-
|
|
206
|
-
if child.get("role") == "assistant":
|
|
207
|
-
current_parent = child.get("id")
|
|
208
|
-
break
|
|
313
|
+
# Handle completion
|
|
314
|
+
if data.get("final") or data.get("finish_reason") == "stop":
|
|
315
|
+
if current_parent:
|
|
316
|
+
self.parent = current_parent
|
|
209
317
|
|
|
210
|
-
#
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
# Handle completion
|
|
219
|
-
if data.get("final") or data.get("finish_reason") == "stop":
|
|
220
|
-
if current_parent:
|
|
221
|
-
self.parent = current_parent
|
|
222
|
-
|
|
223
|
-
# Update conversation history
|
|
224
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
225
|
-
self.last_response = {"text": streaming_text}
|
|
226
|
-
return
|
|
227
|
-
|
|
228
|
-
except requests.RequestException as e:
|
|
229
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
318
|
+
# Update conversation history
|
|
319
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
320
|
+
self.last_response = {"text": streaming_text}
|
|
321
|
+
return
|
|
322
|
+
|
|
323
|
+
except requests.RequestException as e:
|
|
324
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
230
325
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
326
|
+
def _non_stream_request(self, payload, prompt):
|
|
327
|
+
"""Handle non-streaming requests with the given payload"""
|
|
328
|
+
try:
|
|
329
|
+
# For non-streaming requests, we can directly send without stream=True
|
|
330
|
+
response = self.session.post(
|
|
331
|
+
self.api_endpoint,
|
|
332
|
+
json=payload,
|
|
333
|
+
stream=False,
|
|
334
|
+
timeout=self.timeout
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
if response.status_code != 200:
|
|
338
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
339
|
+
f"Request failed with status code {response.status_code}: {response.text}"
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
# Parse the response as per JS implementation
|
|
343
|
+
raw_response = response.text
|
|
344
|
+
parsed_response = self.parse_stream(raw_response)
|
|
345
|
+
self.conversation.update_chat_history(prompt, parsed_response)
|
|
346
|
+
self.last_response = {"text": parsed_response}
|
|
236
347
|
return self.last_response
|
|
237
|
-
|
|
238
|
-
|
|
348
|
+
|
|
349
|
+
except Exception as e:
|
|
350
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
239
351
|
|
|
240
352
|
def chat(
|
|
241
353
|
self,
|
|
@@ -243,13 +355,29 @@ class AllenAI(Provider):
|
|
|
243
355
|
stream: bool = False,
|
|
244
356
|
optimizer: str = None,
|
|
245
357
|
conversationally: bool = False,
|
|
358
|
+
host: str = None,
|
|
359
|
+
options: dict = None,
|
|
246
360
|
) -> str:
|
|
247
361
|
def for_stream():
|
|
248
|
-
for response in self.ask(
|
|
362
|
+
for response in self.ask(
|
|
363
|
+
prompt,
|
|
364
|
+
True,
|
|
365
|
+
optimizer=optimizer,
|
|
366
|
+
conversationally=conversationally,
|
|
367
|
+
host=host,
|
|
368
|
+
options=options
|
|
369
|
+
):
|
|
249
370
|
yield self.get_message(response)
|
|
250
371
|
def for_non_stream():
|
|
251
372
|
return self.get_message(
|
|
252
|
-
self.ask(
|
|
373
|
+
self.ask(
|
|
374
|
+
prompt,
|
|
375
|
+
False,
|
|
376
|
+
optimizer=optimizer,
|
|
377
|
+
conversationally=conversationally,
|
|
378
|
+
host=host,
|
|
379
|
+
options=options
|
|
380
|
+
)
|
|
253
381
|
)
|
|
254
382
|
return for_stream() if stream else for_non_stream()
|
|
255
383
|
|
|
@@ -257,6 +385,8 @@ class AllenAI(Provider):
|
|
|
257
385
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
258
386
|
return response["text"]
|
|
259
387
|
|
|
388
|
+
|
|
389
|
+
|
|
260
390
|
if __name__ == "__main__":
|
|
261
391
|
print("-" * 80)
|
|
262
392
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
@@ -264,7 +394,9 @@ if __name__ == "__main__":
|
|
|
264
394
|
|
|
265
395
|
for model in AllenAI.AVAILABLE_MODELS:
|
|
266
396
|
try:
|
|
397
|
+
# Auto-detect host
|
|
267
398
|
test_ai = AllenAI(model=model, timeout=60)
|
|
399
|
+
# Pass the host explicitly to display accurate error messages
|
|
268
400
|
response = test_ai.chat("Say 'Hello' in one word")
|
|
269
401
|
response_text = response
|
|
270
402
|
|
|
@@ -272,9 +404,10 @@ if __name__ == "__main__":
|
|
|
272
404
|
status = "✓"
|
|
273
405
|
# Truncate response if too long
|
|
274
406
|
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
407
|
+
print(f"{model:<50} {status:<10} {display_text} (host: {test_ai.host})")
|
|
275
408
|
else:
|
|
276
409
|
status = "✗"
|
|
277
410
|
display_text = "Empty or invalid response"
|
|
278
|
-
|
|
411
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
279
412
|
except Exception as e:
|
|
280
413
|
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/DeepSeek.py
CHANGED
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import Any, Dict
|
|
4
4
|
from webscout.AIutel import Optimizers
|
|
5
5
|
from webscout.AIutel import Conversation
|
|
6
6
|
from webscout.AIutel import AwesomePrompts
|
|
7
7
|
from webscout.AIbase import Provider
|
|
8
8
|
from webscout import exceptions
|
|
9
|
-
from webscout.litagent import LitAgent as Lit
|
|
10
9
|
|
|
11
10
|
class DeepSeek(Provider):
|
|
12
11
|
"""
|
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -17,31 +17,40 @@ class DeepInfra(Provider):
|
|
|
17
17
|
|
|
18
18
|
AVAILABLE_MODELS = [
|
|
19
19
|
# "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
|
|
20
|
+
|
|
20
21
|
"deepseek-ai/DeepSeek-R1",
|
|
21
|
-
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
22
|
-
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
23
22
|
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
24
23
|
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
25
24
|
"deepseek-ai/DeepSeek-R1-Turbo",
|
|
26
25
|
"deepseek-ai/DeepSeek-V3",
|
|
26
|
+
|
|
27
27
|
"google/gemma-2-27b-it",
|
|
28
28
|
"google/gemma-2-9b-it",
|
|
29
29
|
"google/gemma-3-27b-it",
|
|
30
|
+
"google/gemma-3-12b-it",
|
|
31
|
+
"google/gemma-3-4b-it",
|
|
30
32
|
# "google/gemini-1.5-flash", # >>>> NOT WORKING
|
|
31
33
|
# "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
|
|
32
34
|
# "google/gemini-2.0-flash-001", # >>>> NOT WORKING
|
|
35
|
+
|
|
33
36
|
# "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
|
|
37
|
+
|
|
34
38
|
# "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
|
|
35
39
|
# "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
|
|
36
|
-
"meta-llama/Llama-
|
|
37
|
-
"meta-llama/Llama-
|
|
40
|
+
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
41
|
+
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
42
|
+
# "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
|
|
43
|
+
# "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
|
|
44
|
+
"meta-llama/Llama-3.3-70B-Instruct",
|
|
45
|
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
38
46
|
# "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
|
|
39
47
|
# "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
|
|
40
48
|
# "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
|
|
41
|
-
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
49
|
+
# "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
|
|
42
50
|
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
43
51
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
44
52
|
# "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
|
|
53
|
+
|
|
45
54
|
"microsoft/phi-4",
|
|
46
55
|
"microsoft/Phi-4-multimodal-instruct",
|
|
47
56
|
"microsoft/WizardLM-2-8x22B",
|
|
@@ -58,8 +67,6 @@ class DeepInfra(Provider):
|
|
|
58
67
|
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
59
68
|
# "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
|
|
60
69
|
# "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
|
|
61
|
-
"meta-llama/Llama-3.3-70B-Instruct",
|
|
62
|
-
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
63
70
|
]
|
|
64
71
|
|
|
65
72
|
def __init__(
|
|
@@ -74,6 +81,7 @@ class DeepInfra(Provider):
|
|
|
74
81
|
history_offset: int = 10250,
|
|
75
82
|
act: str = None,
|
|
76
83
|
model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
84
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
77
85
|
browser: str = "chrome"
|
|
78
86
|
):
|
|
79
87
|
"""Initializes the DeepInfra API client."""
|
|
@@ -111,7 +119,7 @@ class DeepInfra(Provider):
|
|
|
111
119
|
self.session = requests.Session()
|
|
112
120
|
self.session.headers.update(self.headers)
|
|
113
121
|
self.session.proxies.update(proxies)
|
|
114
|
-
|
|
122
|
+
self.system_prompt = system_prompt
|
|
115
123
|
self.is_conversation = is_conversation
|
|
116
124
|
self.max_tokens_to_sample = max_tokens
|
|
117
125
|
self.timeout = timeout
|
|
@@ -182,7 +190,7 @@ class DeepInfra(Provider):
|
|
|
182
190
|
payload = {
|
|
183
191
|
"model": self.model,
|
|
184
192
|
"messages": [
|
|
185
|
-
{"role": "system", "content":
|
|
193
|
+
{"role": "system", "content": self.system_prompt},
|
|
186
194
|
{"role": "user", "content": conversation_prompt},
|
|
187
195
|
],
|
|
188
196
|
"stream": stream
|