webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
webscout/Provider/toolbaz.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import re
|
|
2
|
-
import
|
|
2
|
+
from curl_cffi.requests import Session
|
|
3
|
+
from curl_cffi import CurlError
|
|
3
4
|
import uuid
|
|
4
5
|
import base64
|
|
5
6
|
import json
|
|
@@ -9,11 +10,11 @@ import time
|
|
|
9
10
|
from datetime import datetime
|
|
10
11
|
from typing import Any, Dict, Optional, Generator, Union, List
|
|
11
12
|
|
|
13
|
+
from webscout import exceptions
|
|
12
14
|
from webscout.AIutel import Optimizers
|
|
13
15
|
from webscout.AIutel import Conversation
|
|
14
16
|
from webscout.AIutel import AwesomePrompts
|
|
15
|
-
from webscout.AIbase import Provider
|
|
16
|
-
from webscout import exceptions
|
|
17
|
+
from webscout.AIbase import Provider
|
|
17
18
|
|
|
18
19
|
class Toolbaz(Provider):
|
|
19
20
|
"""
|
|
@@ -47,7 +48,7 @@ class Toolbaz(Provider):
|
|
|
47
48
|
def __init__(
|
|
48
49
|
self,
|
|
49
50
|
is_conversation: bool = True,
|
|
50
|
-
max_tokens: int = 600,
|
|
51
|
+
max_tokens: int = 600, # Note: max_tokens is not directly used by the API
|
|
51
52
|
timeout: int = 30,
|
|
52
53
|
intro: str = None,
|
|
53
54
|
filepath: str = None,
|
|
@@ -56,7 +57,7 @@ class Toolbaz(Provider):
|
|
|
56
57
|
history_offset: int = 10250,
|
|
57
58
|
act: str = None,
|
|
58
59
|
model: str = "gemini-2.0-flash",
|
|
59
|
-
system_prompt: str = "You are a helpful AI assistant."
|
|
60
|
+
system_prompt: str = "You are a helpful AI assistant." # Note: system_prompt is not directly used by the API
|
|
60
61
|
):
|
|
61
62
|
"""
|
|
62
63
|
Initializes the Toolbaz API with given parameters.
|
|
@@ -64,28 +65,31 @@ class Toolbaz(Provider):
|
|
|
64
65
|
if model not in self.AVAILABLE_MODELS:
|
|
65
66
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
66
67
|
|
|
67
|
-
|
|
68
|
+
# Initialize curl_cffi Session
|
|
69
|
+
self.session = Session()
|
|
68
70
|
self.is_conversation = is_conversation
|
|
69
71
|
self.max_tokens_to_sample = max_tokens
|
|
70
72
|
self.timeout = timeout
|
|
71
73
|
self.last_response = {}
|
|
72
74
|
self.system_prompt = system_prompt
|
|
73
75
|
self.model = model
|
|
74
|
-
self.proxies = proxies
|
|
76
|
+
self.proxies = proxies # Store proxies for later use in requests
|
|
75
77
|
|
|
76
|
-
# Set up headers
|
|
78
|
+
# Set up headers for the curl_cffi session
|
|
77
79
|
self.session.headers.update({
|
|
78
|
-
"user-agent": "Mozilla/5.0 (Linux; Android 10)",
|
|
80
|
+
"user-agent": "Mozilla/5.0 (Linux; Android 10)", # Keep specific user-agent
|
|
79
81
|
"accept": "*/*",
|
|
80
82
|
"accept-language": "en-US",
|
|
81
83
|
"cache-control": "no-cache",
|
|
82
|
-
"connection": "keep-alive",
|
|
83
84
|
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
|
|
84
85
|
"origin": "https://toolbaz.com",
|
|
85
86
|
"pragma": "no-cache",
|
|
86
87
|
"referer": "https://toolbaz.com/",
|
|
87
88
|
"sec-fetch-mode": "cors"
|
|
89
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
88
90
|
})
|
|
91
|
+
# Assign proxies directly to the session
|
|
92
|
+
self.session.proxies = proxies
|
|
89
93
|
|
|
90
94
|
# Initialize conversation history
|
|
91
95
|
self.__available_optimizers = (
|
|
@@ -139,20 +143,34 @@ class Toolbaz(Provider):
|
|
|
139
143
|
"session_id": session_id,
|
|
140
144
|
"token": token
|
|
141
145
|
}
|
|
142
|
-
|
|
143
|
-
resp.
|
|
146
|
+
# Use curl_cffi session post WITHOUT impersonate for token request
|
|
147
|
+
resp = self.session.post(
|
|
148
|
+
"https://data.toolbaz.com/token.php",
|
|
149
|
+
data=data
|
|
150
|
+
# Removed impersonate="chrome110" for this specific request
|
|
151
|
+
)
|
|
152
|
+
resp.raise_for_status() # Check for HTTP errors
|
|
144
153
|
result = resp.json()
|
|
145
154
|
if result.get("success"):
|
|
146
155
|
return {"token": result["token"], "session_id": session_id}
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
156
|
+
# Raise error if success is not true
|
|
157
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed: API response indicates failure. Response: {result}")
|
|
158
|
+
except CurlError as e: # Catch CurlError specifically
|
|
159
|
+
# Raise a specific error indicating CurlError during auth
|
|
160
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed due to network error (CurlError): {e}") from e
|
|
161
|
+
except json.JSONDecodeError as e:
|
|
162
|
+
# Raise error for JSON decoding issues
|
|
163
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed: Could not decode JSON response. Error: {e}. Response text: {getattr(resp, 'text', 'N/A')}") from e
|
|
164
|
+
except Exception as e: # Catch other potential errors (like HTTPError from raise_for_status)
|
|
165
|
+
# Raise a specific error indicating a general failure during auth
|
|
166
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
167
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed due to an unexpected error ({type(e).__name__}): {e} - {err_text}") from e
|
|
150
168
|
|
|
151
169
|
def ask(
|
|
152
170
|
self,
|
|
153
171
|
prompt: str,
|
|
154
172
|
stream: bool = False,
|
|
155
|
-
raw: bool = False, # Kept for compatibility
|
|
173
|
+
raw: bool = False, # Kept for compatibility, but output is always dict/string
|
|
156
174
|
optimizer: Optional[str] = None,
|
|
157
175
|
conversationally: bool = False,
|
|
158
176
|
) -> Union[Dict[str, Any], Generator]:
|
|
@@ -166,9 +184,9 @@ class Toolbaz(Provider):
|
|
|
166
184
|
conversation_prompt if conversationally else prompt
|
|
167
185
|
)
|
|
168
186
|
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
187
|
+
# get_auth now raises exceptions on failure
|
|
188
|
+
auth = self.get_auth()
|
|
189
|
+
# No need to check if auth is None, as an exception would have been raised
|
|
172
190
|
|
|
173
191
|
data = {
|
|
174
192
|
"text": conversation_prompt,
|
|
@@ -179,12 +197,13 @@ class Toolbaz(Provider):
|
|
|
179
197
|
|
|
180
198
|
def for_stream():
|
|
181
199
|
try:
|
|
200
|
+
# Use curl_cffi session post with impersonate for the main request
|
|
182
201
|
resp = self.session.post(
|
|
183
202
|
"https://data.toolbaz.com/writing.php",
|
|
184
203
|
data=data,
|
|
185
204
|
stream=True,
|
|
186
|
-
|
|
187
|
-
|
|
205
|
+
timeout=self.timeout,
|
|
206
|
+
impersonate="chrome110" # Keep impersonate here
|
|
188
207
|
)
|
|
189
208
|
resp.raise_for_status()
|
|
190
209
|
|
|
@@ -192,54 +211,71 @@ class Toolbaz(Provider):
|
|
|
192
211
|
tag_start = "[model:"
|
|
193
212
|
streaming_text = ""
|
|
194
213
|
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
214
|
+
# Iterate over bytes and decode manually
|
|
215
|
+
for chunk_bytes in resp.iter_content(chunk_size=1024): # Read in larger chunks
|
|
216
|
+
if chunk_bytes:
|
|
217
|
+
text = chunk_bytes.decode(errors="ignore")
|
|
198
218
|
buffer += text
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
219
|
+
|
|
220
|
+
processed_buffer = ""
|
|
221
|
+
last_processed_index = 0
|
|
222
|
+
# Find all complete tags and process text between them
|
|
223
|
+
for match in re.finditer(r"\[model:.*?\]", buffer):
|
|
224
|
+
# Add text before the tag
|
|
225
|
+
segment = buffer[last_processed_index:match.start()]
|
|
226
|
+
if segment:
|
|
227
|
+
processed_buffer += segment
|
|
228
|
+
last_processed_index = match.end()
|
|
229
|
+
|
|
230
|
+
# Add remaining text after the last complete tag
|
|
231
|
+
processed_buffer += buffer[last_processed_index:]
|
|
232
|
+
|
|
233
|
+
# Now, check for incomplete tag at the end
|
|
234
|
+
last_tag_start_index = processed_buffer.rfind(tag_start)
|
|
235
|
+
|
|
236
|
+
if last_tag_start_index != -1:
|
|
237
|
+
# Text before the potential incomplete tag
|
|
238
|
+
text_to_yield = processed_buffer[:last_tag_start_index]
|
|
239
|
+
# Keep the potential incomplete tag start for the next iteration
|
|
240
|
+
buffer = processed_buffer[last_tag_start_index:]
|
|
212
241
|
else:
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
242
|
+
# No potential incomplete tag found, yield everything processed
|
|
243
|
+
text_to_yield = processed_buffer
|
|
244
|
+
buffer = "" # Clear buffer as everything is processed
|
|
245
|
+
|
|
246
|
+
if text_to_yield:
|
|
247
|
+
streaming_text += text_to_yield
|
|
248
|
+
# Yield dict or raw string
|
|
249
|
+
yield {"text": text_to_yield} if not raw else text_to_yield
|
|
250
|
+
|
|
251
|
+
# Process any remaining text in the buffer after the loop finishes
|
|
252
|
+
# Remove any potential tags (complete or incomplete)
|
|
253
|
+
final_text = re.sub(r"\[model:.*?\]", "", buffer)
|
|
254
|
+
if final_text:
|
|
255
|
+
streaming_text += final_text
|
|
256
|
+
yield {"text": final_text} if not raw else final_text
|
|
223
257
|
|
|
224
258
|
self.last_response = {"text": streaming_text}
|
|
225
259
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
226
260
|
|
|
227
|
-
except
|
|
228
|
-
raise exceptions.
|
|
229
|
-
except Exception as e:
|
|
230
|
-
raise exceptions.
|
|
261
|
+
except CurlError as e: # Catch CurlError
|
|
262
|
+
raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
|
|
263
|
+
except Exception as e: # Catch other exceptions
|
|
264
|
+
raise exceptions.FailedToGenerateResponseError(f"Unexpected error during stream: {str(e)}") from e
|
|
231
265
|
|
|
232
266
|
def for_non_stream():
|
|
233
267
|
try:
|
|
268
|
+
# Use curl_cffi session post with impersonate for the main request
|
|
234
269
|
resp = self.session.post(
|
|
235
270
|
"https://data.toolbaz.com/writing.php",
|
|
236
271
|
data=data,
|
|
237
|
-
|
|
238
|
-
|
|
272
|
+
timeout=self.timeout,
|
|
273
|
+
impersonate="chrome110" # Keep impersonate here
|
|
239
274
|
)
|
|
240
275
|
resp.raise_for_status()
|
|
241
276
|
|
|
242
|
-
|
|
277
|
+
# Use response.text which is already decoded
|
|
278
|
+
text = resp.text
|
|
243
279
|
# Remove [model: ...] tags
|
|
244
280
|
text = re.sub(r"\[model:.*?\]", "", text)
|
|
245
281
|
|
|
@@ -248,9 +284,9 @@ class Toolbaz(Provider):
|
|
|
248
284
|
|
|
249
285
|
return self.last_response
|
|
250
286
|
|
|
251
|
-
except
|
|
252
|
-
raise exceptions.FailedToGenerateResponseError(f"Network error: {str(e)}") from e
|
|
253
|
-
except Exception as e:
|
|
287
|
+
except CurlError as e: # Catch CurlError
|
|
288
|
+
raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
|
|
289
|
+
except Exception as e: # Catch other exceptions
|
|
254
290
|
raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}") from e
|
|
255
291
|
|
|
256
292
|
return for_stream() if stream else for_non_stream()
|
|
@@ -263,26 +299,28 @@ class Toolbaz(Provider):
|
|
|
263
299
|
conversationally: bool = False,
|
|
264
300
|
) -> Union[str, Generator[str, None, None]]:
|
|
265
301
|
"""Generates a response from the Toolbaz API."""
|
|
266
|
-
def
|
|
267
|
-
|
|
302
|
+
def for_stream_chat():
|
|
303
|
+
# ask() yields dicts when raw=False
|
|
304
|
+
for response_dict in self.ask(
|
|
268
305
|
prompt,
|
|
269
306
|
stream=True,
|
|
307
|
+
raw=False, # Ensure ask yields dicts
|
|
270
308
|
optimizer=optimizer,
|
|
271
309
|
conversationally=conversationally
|
|
272
310
|
):
|
|
273
|
-
yield self.get_message(
|
|
311
|
+
yield self.get_message(response_dict)
|
|
274
312
|
|
|
275
|
-
def
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
)
|
|
313
|
+
def for_non_stream_chat():
|
|
314
|
+
# ask() returns a dict when stream=False
|
|
315
|
+
response_dict = self.ask(
|
|
316
|
+
prompt,
|
|
317
|
+
stream=False,
|
|
318
|
+
optimizer=optimizer,
|
|
319
|
+
conversationally=conversationally,
|
|
283
320
|
)
|
|
321
|
+
return self.get_message(response_dict)
|
|
284
322
|
|
|
285
|
-
return
|
|
323
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
286
324
|
|
|
287
325
|
def get_message(self, response: Dict[str, Any]) -> str:
|
|
288
326
|
"""Extract the message from the response.
|
|
@@ -298,23 +336,40 @@ class Toolbaz(Provider):
|
|
|
298
336
|
|
|
299
337
|
# Example usage
|
|
300
338
|
if __name__ == "__main__":
|
|
339
|
+
# Ensure curl_cffi is installed
|
|
340
|
+
from rich import print # Use rich print if available
|
|
341
|
+
print("-" * 80)
|
|
342
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
343
|
+
print("-" * 80)
|
|
301
344
|
# Test the provider with different models
|
|
302
345
|
for model in Toolbaz.AVAILABLE_MODELS:
|
|
303
346
|
try:
|
|
304
347
|
test_ai = Toolbaz(model=model, timeout=60)
|
|
305
|
-
|
|
348
|
+
# Test stream first
|
|
349
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
306
350
|
response_text = ""
|
|
307
|
-
|
|
351
|
+
# print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
352
|
+
for chunk in response_stream:
|
|
308
353
|
response_text += chunk
|
|
309
|
-
print
|
|
354
|
+
# Optional: print chunks for visual feedback
|
|
355
|
+
# print(chunk, end="", flush=True)
|
|
310
356
|
|
|
311
357
|
if response_text and len(response_text.strip()) > 0:
|
|
312
358
|
status = "✓"
|
|
313
|
-
#
|
|
314
|
-
|
|
359
|
+
# Clean and truncate response
|
|
360
|
+
clean_text = response_text.strip()
|
|
361
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
315
362
|
else:
|
|
316
|
-
status = "✗"
|
|
317
|
-
display_text = "Empty or invalid response"
|
|
363
|
+
status = "✗ (Stream)"
|
|
364
|
+
display_text = "Empty or invalid stream response"
|
|
318
365
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
366
|
+
|
|
367
|
+
# Optional: Add non-stream test if needed
|
|
368
|
+
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
369
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
370
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
371
|
+
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
372
|
+
|
|
319
373
|
except Exception as e:
|
|
320
|
-
|
|
374
|
+
# Print full error for debugging
|
|
375
|
+
print(f"\r{model:<50} {'✗':<10} Error: {str(e)}")
|
webscout/Provider/turboseek.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
|
|
4
5
|
from webscout.AIutel import Optimizers
|
|
5
6
|
from webscout.AIutel import Conversation
|
|
6
7
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
7
|
-
from webscout.AIbase import Provider
|
|
8
|
+
from webscout.AIbase import Provider
|
|
8
9
|
from webscout import exceptions
|
|
9
10
|
from typing import Union, Any, AsyncGenerator, Dict
|
|
10
11
|
from webscout.litagent import LitAgent
|
|
@@ -26,7 +27,7 @@ class TurboSeek(Provider):
|
|
|
26
27
|
proxies: dict = {},
|
|
27
28
|
history_offset: int = 10250,
|
|
28
29
|
act: str = None,
|
|
29
|
-
model: str = "Llama 3.1 70B"
|
|
30
|
+
model: str = "Llama 3.1 70B" # Note: model parameter is not used by the API endpoint
|
|
30
31
|
):
|
|
31
32
|
"""Instantiates TurboSeek
|
|
32
33
|
|
|
@@ -41,7 +42,8 @@ class TurboSeek(Provider):
|
|
|
41
42
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
42
43
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
43
44
|
"""
|
|
44
|
-
|
|
45
|
+
# Initialize curl_cffi Session
|
|
46
|
+
self.session = Session()
|
|
45
47
|
self.is_conversation = is_conversation
|
|
46
48
|
self.max_tokens_to_sample = max_tokens
|
|
47
49
|
self.chat_endpoint = "https://www.turboseek.io/api/getAnswer"
|
|
@@ -49,14 +51,9 @@ class TurboSeek(Provider):
|
|
|
49
51
|
self.timeout = timeout
|
|
50
52
|
self.last_response = {}
|
|
51
53
|
self.headers = {
|
|
52
|
-
"authority": "www.turboseek.io",
|
|
53
|
-
"method": "POST",
|
|
54
|
-
"path": "/api/getAnswer",
|
|
55
|
-
"scheme": "https",
|
|
56
54
|
"accept": "*/*",
|
|
57
55
|
"accept-encoding": "gzip, deflate, br, zstd",
|
|
58
56
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
59
|
-
"content-length": "63",
|
|
60
57
|
"content-type": "application/json",
|
|
61
58
|
"dnt": "1",
|
|
62
59
|
"origin": "https://www.turboseek.io",
|
|
@@ -76,7 +73,9 @@ class TurboSeek(Provider):
|
|
|
76
73
|
for method in dir(Optimizers)
|
|
77
74
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
78
75
|
)
|
|
76
|
+
# Update curl_cffi session headers and proxies
|
|
79
77
|
self.session.headers.update(self.headers)
|
|
78
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
80
79
|
Conversation.intro = (
|
|
81
80
|
AwesomePrompts().get_act(
|
|
82
81
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -88,7 +87,6 @@ class TurboSeek(Provider):
|
|
|
88
87
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
89
88
|
)
|
|
90
89
|
self.conversation.history_offset = history_offset
|
|
91
|
-
self.session.proxies = proxies
|
|
92
90
|
|
|
93
91
|
def ask(
|
|
94
92
|
self,
|
|
@@ -125,41 +123,75 @@ class TurboSeek(Provider):
|
|
|
125
123
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
126
124
|
)
|
|
127
125
|
|
|
128
|
-
self.session.headers.update(self.headers)
|
|
129
126
|
payload = {
|
|
130
127
|
"question": conversation_prompt,
|
|
131
128
|
"sources": []
|
|
132
129
|
}
|
|
133
130
|
|
|
134
131
|
def for_stream():
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
132
|
+
try: # Add try block for CurlError
|
|
133
|
+
# Use curl_cffi session post with impersonate
|
|
134
|
+
response = self.session.post(
|
|
135
|
+
self.chat_endpoint,
|
|
136
|
+
json=payload,
|
|
137
|
+
stream=True,
|
|
138
|
+
timeout=self.timeout,
|
|
139
|
+
impersonate="chrome120", # Try a different impersonation profile
|
|
141
140
|
)
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
141
|
+
if not response.ok:
|
|
142
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
143
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
144
|
+
)
|
|
145
|
+
streaming_text = ""
|
|
146
|
+
# Iterate over bytes and decode manually
|
|
147
|
+
for value_bytes in response.iter_lines():
|
|
148
|
+
try:
|
|
149
|
+
if value_bytes and value_bytes.startswith(b"data: "): # Check for bytes
|
|
150
|
+
# Decode bytes to string
|
|
151
|
+
line = value_bytes[6:].decode('utf-8')
|
|
152
|
+
data = json.loads(line)
|
|
153
|
+
if "text" in data:
|
|
154
|
+
# Decode potential unicode escapes
|
|
155
|
+
content = data["text"].encode().decode('unicode_escape')
|
|
156
|
+
streaming_text += content
|
|
157
|
+
resp = dict(text=content)
|
|
158
|
+
self.last_response.update(resp) # Update last_response incrementally
|
|
159
|
+
# Yield raw bytes or dict based on flag
|
|
160
|
+
yield value_bytes if raw else resp
|
|
161
|
+
except (json.decoder.JSONDecodeError, UnicodeDecodeError):
|
|
162
|
+
pass # Ignore lines that are not valid JSON or cannot be decoded
|
|
163
|
+
# Update conversation history after stream finishes
|
|
164
|
+
if streaming_text: # Only update if content was received
|
|
165
|
+
self.conversation.update_chat_history(
|
|
166
|
+
prompt, streaming_text # Use the fully aggregated text
|
|
167
|
+
)
|
|
168
|
+
except CurlError as e: # Catch CurlError
|
|
169
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
170
|
+
except Exception as e: # Catch other potential exceptions
|
|
171
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
172
|
+
|
|
159
173
|
|
|
160
174
|
def for_non_stream():
|
|
161
|
-
|
|
162
|
-
|
|
175
|
+
# Aggregate the stream using the updated for_stream logic
|
|
176
|
+
full_text = ""
|
|
177
|
+
for chunk_data in for_stream():
|
|
178
|
+
# Ensure chunk_data is a dict (not raw) and has 'text'
|
|
179
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
180
|
+
full_text += chunk_data["text"]
|
|
181
|
+
# If raw=True, chunk_data is bytes, decode and process if needed (though raw non-stream is less common)
|
|
182
|
+
elif isinstance(chunk_data, bytes):
|
|
183
|
+
try:
|
|
184
|
+
if chunk_data.startswith(b"data: "):
|
|
185
|
+
line = chunk_data[6:].decode('utf-8')
|
|
186
|
+
data = json.loads(line)
|
|
187
|
+
if "text" in data:
|
|
188
|
+
content = data["text"].encode().decode('unicode_escape')
|
|
189
|
+
full_text += content
|
|
190
|
+
except (json.decoder.JSONDecodeError, UnicodeDecodeError):
|
|
191
|
+
pass
|
|
192
|
+
# last_response and history are updated within for_stream
|
|
193
|
+
# Ensure last_response reflects the complete aggregated text
|
|
194
|
+
self.last_response = {"text": full_text}
|
|
163
195
|
return self.last_response
|
|
164
196
|
|
|
165
197
|
return for_stream() if stream else for_non_stream()
|
|
@@ -209,11 +241,30 @@ class TurboSeek(Provider):
|
|
|
209
241
|
str: Message extracted
|
|
210
242
|
"""
|
|
211
243
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
212
|
-
|
|
244
|
+
# Text is already decoded in ask method
|
|
245
|
+
return response.get("text", "")
|
|
246
|
+
|
|
213
247
|
if __name__ == '__main__':
|
|
248
|
+
# Ensure curl_cffi is installed
|
|
214
249
|
from rich import print
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
250
|
+
try: # Add try-except block for testing
|
|
251
|
+
ai = TurboSeek(timeout=60)
|
|
252
|
+
print("[bold blue]Testing Stream:[/bold blue]")
|
|
253
|
+
response_stream = ai.chat("hello buddy", stream=True)
|
|
254
|
+
full_stream_response = ""
|
|
255
|
+
for chunk in response_stream:
|
|
256
|
+
print(chunk, end="", flush=True)
|
|
257
|
+
full_stream_response += chunk
|
|
258
|
+
print("\n[bold green]Stream Test Complete.[/bold green]\n")
|
|
259
|
+
|
|
260
|
+
# Optional: Test non-stream
|
|
261
|
+
# print("[bold blue]Testing Non-Stream:[/bold blue]")
|
|
262
|
+
# response_non_stream = ai.chat("What is the capital of France?", stream=False)
|
|
263
|
+
# print(response_non_stream)
|
|
264
|
+
# print("[bold green]Non-Stream Test Complete.[/bold green]")
|
|
265
|
+
|
|
266
|
+
except exceptions.FailedToGenerateResponseError as e:
|
|
267
|
+
print(f"\n[bold red]API Error:[/bold red] {e}")
|
|
268
|
+
except Exception as e:
|
|
269
|
+
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|
|
219
270
|
|