webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/sonus.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
from typing import Any, Dict, Optional, Generator, Union
|
|
4
5
|
from webscout.AIutel import Optimizers
|
|
5
6
|
from webscout.AIutel import Conversation
|
|
6
|
-
from webscout.AIutel import AwesomePrompts
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
7
8
|
from webscout.AIbase import Provider
|
|
8
9
|
from webscout import exceptions
|
|
9
10
|
from webscout.litagent import LitAgent
|
|
@@ -21,7 +22,7 @@ class SonusAI(Provider):
|
|
|
21
22
|
def __init__(
|
|
22
23
|
self,
|
|
23
24
|
is_conversation: bool = True,
|
|
24
|
-
max_tokens: int = 2049,
|
|
25
|
+
max_tokens: int = 2049, # Note: max_tokens is not directly used by this API
|
|
25
26
|
timeout: int = 30,
|
|
26
27
|
intro: str = None,
|
|
27
28
|
filepath: str = None,
|
|
@@ -44,11 +45,14 @@ class SonusAI(Provider):
|
|
|
44
45
|
'Origin': 'https://chat.sonus.ai',
|
|
45
46
|
'Referer': 'https://chat.sonus.ai/',
|
|
46
47
|
'User-Agent': LitAgent().random()
|
|
48
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
47
49
|
}
|
|
48
50
|
|
|
49
|
-
|
|
51
|
+
# Initialize curl_cffi Session
|
|
52
|
+
self.session = Session()
|
|
53
|
+
# Update curl_cffi session headers and proxies
|
|
50
54
|
self.session.headers.update(self.headers)
|
|
51
|
-
self.session.proxies
|
|
55
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
52
56
|
|
|
53
57
|
self.is_conversation = is_conversation
|
|
54
58
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -74,6 +78,13 @@ class SonusAI(Provider):
|
|
|
74
78
|
)
|
|
75
79
|
self.conversation.history_offset = history_offset
|
|
76
80
|
|
|
81
|
+
@staticmethod
|
|
82
|
+
def _sonus_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
83
|
+
"""Extracts content from Sonus stream JSON objects."""
|
|
84
|
+
if isinstance(chunk, dict) and "content" in chunk:
|
|
85
|
+
return chunk.get("content")
|
|
86
|
+
return None
|
|
87
|
+
|
|
77
88
|
def ask(
|
|
78
89
|
self,
|
|
79
90
|
prompt: str,
|
|
@@ -92,72 +103,101 @@ class SonusAI(Provider):
|
|
|
92
103
|
else:
|
|
93
104
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
94
105
|
|
|
95
|
-
# Prepare the multipart form data
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
'
|
|
99
|
-
'
|
|
100
|
-
'
|
|
106
|
+
# Prepare the multipart form data (curl_cffi handles tuples for files/data)
|
|
107
|
+
# No need for explicit (None, ...) for simple fields when using `data=`
|
|
108
|
+
form_data = {
|
|
109
|
+
'message': conversation_prompt,
|
|
110
|
+
'history': "", # Explicitly empty string if needed, or omit if None is acceptable
|
|
111
|
+
'reasoning': str(reasoning).lower(),
|
|
112
|
+
'model': self.model
|
|
101
113
|
}
|
|
114
|
+
# Note: curl_cffi's `files` parameter is for actual file uploads.
|
|
115
|
+
# For simple key-value pairs like this, `data` is usually sufficient for multipart/form-data.
|
|
116
|
+
# If the server strictly requires `files`, keep the original structure but it might not work as expected with curl_cffi without actual file objects.
|
|
102
117
|
|
|
103
118
|
def for_stream():
|
|
104
119
|
try:
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
120
|
+
# Use curl_cffi session post with impersonate
|
|
121
|
+
# Use `data` instead of `files` for simple key-value multipart
|
|
122
|
+
response = self.session.post(
|
|
123
|
+
self.url,
|
|
124
|
+
# headers are set on the session
|
|
125
|
+
data=form_data, # Use data for multipart form fields
|
|
126
|
+
stream=True,
|
|
127
|
+
timeout=self.timeout,
|
|
128
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
129
|
+
)
|
|
130
|
+
if response.status_code != 200:
|
|
131
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
132
|
+
f"Request failed with status code {response.status_code} - {response.text}"
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
streaming_text = ""
|
|
136
|
+
# Use sanitize_stream
|
|
137
|
+
processed_stream = sanitize_stream(
|
|
138
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
139
|
+
intro_value="data:",
|
|
140
|
+
to_json=True, # Stream sends JSON
|
|
141
|
+
content_extractor=self._sonus_extractor, # Use the specific extractor
|
|
142
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
for content_chunk in processed_stream:
|
|
146
|
+
# content_chunk is the string extracted by _sonus_extractor
|
|
147
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
148
|
+
streaming_text += content_chunk
|
|
149
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
150
|
+
|
|
151
|
+
# Update history and last response after stream finishes
|
|
152
|
+
self.last_response = {"text": streaming_text}
|
|
153
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
131
154
|
|
|
132
|
-
except
|
|
133
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
155
|
+
except CurlError as e: # Catch CurlError
|
|
156
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
157
|
+
except Exception as e: # Catch other potential exceptions
|
|
158
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
|
|
134
159
|
|
|
135
160
|
def for_non_stream():
|
|
136
161
|
try:
|
|
137
|
-
|
|
162
|
+
# Use curl_cffi session post with impersonate
|
|
163
|
+
response = self.session.post(
|
|
164
|
+
self.url,
|
|
165
|
+
# headers are set on the session
|
|
166
|
+
data=form_data, # Use data for multipart form fields
|
|
167
|
+
timeout=self.timeout,
|
|
168
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
169
|
+
)
|
|
138
170
|
if response.status_code != 200:
|
|
139
171
|
raise exceptions.FailedToGenerateResponseError(
|
|
140
|
-
f"Request failed with status code {response.status_code}"
|
|
172
|
+
f"Request failed with status code {response.status_code} - {response.text}"
|
|
141
173
|
)
|
|
142
174
|
|
|
175
|
+
response_text_raw = response.text # Get raw text
|
|
176
|
+
|
|
177
|
+
# Use sanitize_stream to process the non-streaming text
|
|
178
|
+
processed_stream = sanitize_stream(
|
|
179
|
+
data=response_text_raw.splitlines(), # Split into lines
|
|
180
|
+
intro_value="data:",
|
|
181
|
+
to_json=True,
|
|
182
|
+
content_extractor=self._sonus_extractor,
|
|
183
|
+
yield_raw_on_error=False
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Aggregate the results
|
|
143
187
|
full_response = ""
|
|
144
|
-
for
|
|
145
|
-
if
|
|
146
|
-
|
|
147
|
-
line = line.decode('utf-8')
|
|
148
|
-
if line.startswith('data: '):
|
|
149
|
-
line = line[6:]
|
|
150
|
-
data = json.loads(line)
|
|
151
|
-
if "content" in data:
|
|
152
|
-
full_response += data["content"]
|
|
153
|
-
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
154
|
-
continue
|
|
188
|
+
for content in processed_stream:
|
|
189
|
+
if content and isinstance(content, str):
|
|
190
|
+
full_response += content
|
|
155
191
|
|
|
156
192
|
self.last_response = {"text": full_response}
|
|
157
193
|
self.conversation.update_chat_history(prompt, full_response)
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
194
|
+
# Return dict or raw string
|
|
195
|
+
return full_response if raw else {"text": full_response}
|
|
196
|
+
|
|
197
|
+
except CurlError as e: # Catch CurlError
|
|
198
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
199
|
+
except Exception as e: # Catch other potential exceptions
|
|
200
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
|
|
161
201
|
|
|
162
202
|
return for_stream() if stream else for_non_stream()
|
|
163
203
|
|
|
@@ -169,20 +209,30 @@ class SonusAI(Provider):
|
|
|
169
209
|
conversationally: bool = False,
|
|
170
210
|
reasoning: bool = False,
|
|
171
211
|
) -> Union[str, Generator[str, None, None]]:
|
|
172
|
-
def
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
212
|
+
def for_stream_chat():
|
|
213
|
+
# ask() yields dicts when raw=False
|
|
214
|
+
for response_dict in self.ask(
|
|
215
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
216
|
+
optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
|
|
217
|
+
):
|
|
218
|
+
yield self.get_message(response_dict)
|
|
219
|
+
|
|
220
|
+
def for_non_stream_chat():
|
|
221
|
+
# ask() returns dict or str when raw=False/True
|
|
222
|
+
response_data = self.ask(
|
|
223
|
+
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
224
|
+
optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
|
|
178
225
|
)
|
|
179
|
-
|
|
226
|
+
return self.get_message(response_data) # get_message expects dict
|
|
227
|
+
|
|
228
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
180
229
|
|
|
181
230
|
def get_message(self, response: dict) -> str:
|
|
182
231
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
183
232
|
return response["text"]
|
|
184
233
|
|
|
185
234
|
if __name__ == "__main__":
|
|
235
|
+
# Ensure curl_cffi is installed
|
|
186
236
|
print("-" * 80)
|
|
187
237
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
188
238
|
print("-" * 80)
|
|
@@ -205,4 +255,4 @@ if __name__ == "__main__":
|
|
|
205
255
|
display_text = "Empty or invalid response"
|
|
206
256
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
207
257
|
except Exception as e:
|
|
208
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
258
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/toolbaz.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import re
|
|
2
|
-
import
|
|
2
|
+
from curl_cffi.requests import Session
|
|
3
|
+
from curl_cffi import CurlError
|
|
3
4
|
import uuid
|
|
4
5
|
import base64
|
|
5
6
|
import json
|
|
@@ -9,11 +10,11 @@ import time
|
|
|
9
10
|
from datetime import datetime
|
|
10
11
|
from typing import Any, Dict, Optional, Generator, Union, List
|
|
11
12
|
|
|
13
|
+
from webscout import exceptions
|
|
12
14
|
from webscout.AIutel import Optimizers
|
|
13
15
|
from webscout.AIutel import Conversation
|
|
14
|
-
from webscout.AIutel import AwesomePrompts
|
|
15
|
-
from webscout.AIbase import Provider
|
|
16
|
-
from webscout import exceptions
|
|
16
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
17
|
+
from webscout.AIbase import Provider
|
|
17
18
|
|
|
18
19
|
class Toolbaz(Provider):
|
|
19
20
|
"""
|
|
@@ -25,6 +26,7 @@ class Toolbaz(Provider):
|
|
|
25
26
|
"gemini-2.0-flash-thinking",
|
|
26
27
|
"gemini-2.0-flash",
|
|
27
28
|
"gemini-1.5-flash",
|
|
29
|
+
"o3-mini",
|
|
28
30
|
"gpt-4o-latest",
|
|
29
31
|
"gpt-4o",
|
|
30
32
|
"deepseek-r1",
|
|
@@ -47,7 +49,7 @@ class Toolbaz(Provider):
|
|
|
47
49
|
def __init__(
|
|
48
50
|
self,
|
|
49
51
|
is_conversation: bool = True,
|
|
50
|
-
max_tokens: int = 600,
|
|
52
|
+
max_tokens: int = 600, # Note: max_tokens is not directly used by the API
|
|
51
53
|
timeout: int = 30,
|
|
52
54
|
intro: str = None,
|
|
53
55
|
filepath: str = None,
|
|
@@ -56,7 +58,7 @@ class Toolbaz(Provider):
|
|
|
56
58
|
history_offset: int = 10250,
|
|
57
59
|
act: str = None,
|
|
58
60
|
model: str = "gemini-2.0-flash",
|
|
59
|
-
system_prompt: str = "You are a helpful AI assistant."
|
|
61
|
+
system_prompt: str = "You are a helpful AI assistant." # Note: system_prompt is not directly used by the API
|
|
60
62
|
):
|
|
61
63
|
"""
|
|
62
64
|
Initializes the Toolbaz API with given parameters.
|
|
@@ -64,28 +66,31 @@ class Toolbaz(Provider):
|
|
|
64
66
|
if model not in self.AVAILABLE_MODELS:
|
|
65
67
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
66
68
|
|
|
67
|
-
|
|
69
|
+
# Initialize curl_cffi Session
|
|
70
|
+
self.session = Session()
|
|
68
71
|
self.is_conversation = is_conversation
|
|
69
72
|
self.max_tokens_to_sample = max_tokens
|
|
70
73
|
self.timeout = timeout
|
|
71
74
|
self.last_response = {}
|
|
72
75
|
self.system_prompt = system_prompt
|
|
73
76
|
self.model = model
|
|
74
|
-
self.proxies = proxies
|
|
77
|
+
self.proxies = proxies # Store proxies for later use in requests
|
|
75
78
|
|
|
76
|
-
# Set up headers
|
|
79
|
+
# Set up headers for the curl_cffi session
|
|
77
80
|
self.session.headers.update({
|
|
78
|
-
"user-agent": "Mozilla/5.0 (Linux; Android 10)",
|
|
81
|
+
"user-agent": "Mozilla/5.0 (Linux; Android 10)", # Keep specific user-agent
|
|
79
82
|
"accept": "*/*",
|
|
80
83
|
"accept-language": "en-US",
|
|
81
84
|
"cache-control": "no-cache",
|
|
82
|
-
"connection": "keep-alive",
|
|
83
85
|
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
|
|
84
86
|
"origin": "https://toolbaz.com",
|
|
85
87
|
"pragma": "no-cache",
|
|
86
88
|
"referer": "https://toolbaz.com/",
|
|
87
89
|
"sec-fetch-mode": "cors"
|
|
90
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
88
91
|
})
|
|
92
|
+
# Assign proxies directly to the session
|
|
93
|
+
self.session.proxies = proxies
|
|
89
94
|
|
|
90
95
|
# Initialize conversation history
|
|
91
96
|
self.__available_optimizers = (
|
|
@@ -107,6 +112,13 @@ class Toolbaz(Provider):
|
|
|
107
112
|
)
|
|
108
113
|
self.conversation.history_offset = history_offset
|
|
109
114
|
|
|
115
|
+
@staticmethod
|
|
116
|
+
def _toolbaz_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
117
|
+
"""Removes [model:...] tags from a string chunk."""
|
|
118
|
+
if isinstance(chunk, str):
|
|
119
|
+
return re.sub(r"\[model:.*?\]", "", chunk)
|
|
120
|
+
return None
|
|
121
|
+
|
|
110
122
|
def random_string(self, length):
|
|
111
123
|
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
|
|
112
124
|
|
|
@@ -139,20 +151,34 @@ class Toolbaz(Provider):
|
|
|
139
151
|
"session_id": session_id,
|
|
140
152
|
"token": token
|
|
141
153
|
}
|
|
142
|
-
|
|
143
|
-
resp.
|
|
154
|
+
# Use curl_cffi session post WITHOUT impersonate for token request
|
|
155
|
+
resp = self.session.post(
|
|
156
|
+
"https://data.toolbaz.com/token.php",
|
|
157
|
+
data=data
|
|
158
|
+
# Removed impersonate="chrome110" for this specific request
|
|
159
|
+
)
|
|
160
|
+
resp.raise_for_status() # Check for HTTP errors
|
|
144
161
|
result = resp.json()
|
|
145
162
|
if result.get("success"):
|
|
146
163
|
return {"token": result["token"], "session_id": session_id}
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
164
|
+
# Raise error if success is not true
|
|
165
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed: API response indicates failure. Response: {result}")
|
|
166
|
+
except CurlError as e: # Catch CurlError specifically
|
|
167
|
+
# Raise a specific error indicating CurlError during auth
|
|
168
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed due to network error (CurlError): {e}") from e
|
|
169
|
+
except json.JSONDecodeError as e:
|
|
170
|
+
# Raise error for JSON decoding issues
|
|
171
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed: Could not decode JSON response. Error: {e}. Response text: {getattr(resp, 'text', 'N/A')}") from e
|
|
172
|
+
except Exception as e: # Catch other potential errors (like HTTPError from raise_for_status)
|
|
173
|
+
# Raise a specific error indicating a general failure during auth
|
|
174
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
175
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed due to an unexpected error ({type(e).__name__}): {e} - {err_text}") from e
|
|
150
176
|
|
|
151
177
|
def ask(
|
|
152
178
|
self,
|
|
153
179
|
prompt: str,
|
|
154
180
|
stream: bool = False,
|
|
155
|
-
raw: bool = False, # Kept for compatibility
|
|
181
|
+
raw: bool = False, # Kept for compatibility, but output is always dict/string
|
|
156
182
|
optimizer: Optional[str] = None,
|
|
157
183
|
conversationally: bool = False,
|
|
158
184
|
) -> Union[Dict[str, Any], Generator]:
|
|
@@ -166,9 +192,9 @@ class Toolbaz(Provider):
|
|
|
166
192
|
conversation_prompt if conversationally else prompt
|
|
167
193
|
)
|
|
168
194
|
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
195
|
+
# get_auth now raises exceptions on failure
|
|
196
|
+
auth = self.get_auth()
|
|
197
|
+
# No need to check if auth is None, as an exception would have been raised
|
|
172
198
|
|
|
173
199
|
data = {
|
|
174
200
|
"text": conversation_prompt,
|
|
@@ -179,67 +205,55 @@ class Toolbaz(Provider):
|
|
|
179
205
|
|
|
180
206
|
def for_stream():
|
|
181
207
|
try:
|
|
208
|
+
# Use curl_cffi session post with impersonate for the main request
|
|
182
209
|
resp = self.session.post(
|
|
183
210
|
"https://data.toolbaz.com/writing.php",
|
|
184
211
|
data=data,
|
|
185
212
|
stream=True,
|
|
186
|
-
|
|
187
|
-
|
|
213
|
+
timeout=self.timeout,
|
|
214
|
+
impersonate="chrome110" # Keep impersonate here
|
|
188
215
|
)
|
|
189
216
|
resp.raise_for_status()
|
|
190
217
|
|
|
191
|
-
buffer = ""
|
|
192
|
-
tag_start = "[model:"
|
|
193
218
|
streaming_text = ""
|
|
194
219
|
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
yield {"text": buffer}
|
|
211
|
-
buffer = ""
|
|
212
|
-
else:
|
|
213
|
-
if buffer[:last_tag]:
|
|
214
|
-
streaming_text += buffer[:last_tag]
|
|
215
|
-
yield {"text": buffer[:last_tag]}
|
|
216
|
-
buffer = buffer[last_tag:]
|
|
217
|
-
|
|
218
|
-
# Remove any remaining [model: ...] tag in the buffer
|
|
219
|
-
buffer = re.sub(r"\[model:.*?\]", "", buffer)
|
|
220
|
-
if buffer:
|
|
221
|
-
streaming_text += buffer
|
|
222
|
-
yield {"text": buffer}
|
|
220
|
+
# Use sanitize_stream with the custom extractor
|
|
221
|
+
# It will decode bytes and yield processed string chunks
|
|
222
|
+
processed_stream = sanitize_stream(
|
|
223
|
+
data=resp.iter_content(chunk_size=None), # Pass byte iterator
|
|
224
|
+
intro_value=None, # No simple prefix
|
|
225
|
+
to_json=False, # Content is text
|
|
226
|
+
content_extractor=self._toolbaz_extractor, # Use the tag remover
|
|
227
|
+
yield_raw_on_error=True # Yield even if extractor somehow fails (though unlikely for regex)
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
for content_chunk in processed_stream:
|
|
231
|
+
# content_chunk is the string with tags removed
|
|
232
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
233
|
+
streaming_text += content_chunk
|
|
234
|
+
yield {"text": content_chunk} if not raw else content_chunk
|
|
223
235
|
|
|
224
236
|
self.last_response = {"text": streaming_text}
|
|
225
237
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
226
238
|
|
|
227
|
-
except
|
|
228
|
-
raise exceptions.
|
|
229
|
-
except Exception as e:
|
|
230
|
-
raise exceptions.
|
|
239
|
+
except CurlError as e: # Catch CurlError
|
|
240
|
+
raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
|
|
241
|
+
except Exception as e: # Catch other exceptions
|
|
242
|
+
raise exceptions.FailedToGenerateResponseError(f"Unexpected error during stream: {str(e)}") from e
|
|
231
243
|
|
|
232
244
|
def for_non_stream():
|
|
233
245
|
try:
|
|
246
|
+
# Use curl_cffi session post with impersonate for the main request
|
|
234
247
|
resp = self.session.post(
|
|
235
248
|
"https://data.toolbaz.com/writing.php",
|
|
236
249
|
data=data,
|
|
237
|
-
|
|
238
|
-
|
|
250
|
+
timeout=self.timeout,
|
|
251
|
+
impersonate="chrome110" # Keep impersonate here
|
|
239
252
|
)
|
|
240
253
|
resp.raise_for_status()
|
|
241
254
|
|
|
242
|
-
|
|
255
|
+
# Use response.text which is already decoded
|
|
256
|
+
text = resp.text
|
|
243
257
|
# Remove [model: ...] tags
|
|
244
258
|
text = re.sub(r"\[model:.*?\]", "", text)
|
|
245
259
|
|
|
@@ -248,9 +262,9 @@ class Toolbaz(Provider):
|
|
|
248
262
|
|
|
249
263
|
return self.last_response
|
|
250
264
|
|
|
251
|
-
except
|
|
252
|
-
raise exceptions.FailedToGenerateResponseError(f"Network error: {str(e)}") from e
|
|
253
|
-
except Exception as e:
|
|
265
|
+
except CurlError as e: # Catch CurlError
|
|
266
|
+
raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
|
|
267
|
+
except Exception as e: # Catch other exceptions
|
|
254
268
|
raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}") from e
|
|
255
269
|
|
|
256
270
|
return for_stream() if stream else for_non_stream()
|
|
@@ -263,26 +277,28 @@ class Toolbaz(Provider):
|
|
|
263
277
|
conversationally: bool = False,
|
|
264
278
|
) -> Union[str, Generator[str, None, None]]:
|
|
265
279
|
"""Generates a response from the Toolbaz API."""
|
|
266
|
-
def
|
|
267
|
-
|
|
280
|
+
def for_stream_chat():
|
|
281
|
+
# ask() yields dicts when raw=False
|
|
282
|
+
for response_dict in self.ask(
|
|
268
283
|
prompt,
|
|
269
284
|
stream=True,
|
|
285
|
+
raw=False, # Ensure ask yields dicts
|
|
270
286
|
optimizer=optimizer,
|
|
271
287
|
conversationally=conversationally
|
|
272
288
|
):
|
|
273
|
-
yield self.get_message(
|
|
289
|
+
yield self.get_message(response_dict)
|
|
274
290
|
|
|
275
|
-
def
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
)
|
|
291
|
+
def for_non_stream_chat():
|
|
292
|
+
# ask() returns a dict when stream=False
|
|
293
|
+
response_dict = self.ask(
|
|
294
|
+
prompt,
|
|
295
|
+
stream=False,
|
|
296
|
+
optimizer=optimizer,
|
|
297
|
+
conversationally=conversationally,
|
|
283
298
|
)
|
|
299
|
+
return self.get_message(response_dict)
|
|
284
300
|
|
|
285
|
-
return
|
|
301
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
286
302
|
|
|
287
303
|
def get_message(self, response: Dict[str, Any]) -> str:
|
|
288
304
|
"""Extract the message from the response.
|
|
@@ -298,23 +314,40 @@ class Toolbaz(Provider):
|
|
|
298
314
|
|
|
299
315
|
# Example usage
|
|
300
316
|
if __name__ == "__main__":
|
|
317
|
+
# Ensure curl_cffi is installed
|
|
318
|
+
from rich import print # Use rich print if available
|
|
319
|
+
print("-" * 80)
|
|
320
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
321
|
+
print("-" * 80)
|
|
301
322
|
# Test the provider with different models
|
|
302
323
|
for model in Toolbaz.AVAILABLE_MODELS:
|
|
303
324
|
try:
|
|
304
325
|
test_ai = Toolbaz(model=model, timeout=60)
|
|
305
|
-
|
|
326
|
+
# Test stream first
|
|
327
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
306
328
|
response_text = ""
|
|
307
|
-
|
|
329
|
+
# print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
330
|
+
for chunk in response_stream:
|
|
308
331
|
response_text += chunk
|
|
309
|
-
print
|
|
332
|
+
# Optional: print chunks for visual feedback
|
|
333
|
+
# print(chunk, end="", flush=True)
|
|
310
334
|
|
|
311
335
|
if response_text and len(response_text.strip()) > 0:
|
|
312
336
|
status = "✓"
|
|
313
|
-
#
|
|
314
|
-
|
|
337
|
+
# Clean and truncate response
|
|
338
|
+
clean_text = response_text.strip()
|
|
339
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
315
340
|
else:
|
|
316
|
-
status = "✗"
|
|
317
|
-
display_text = "Empty or invalid response"
|
|
341
|
+
status = "✗ (Stream)"
|
|
342
|
+
display_text = "Empty or invalid stream response"
|
|
318
343
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
344
|
+
|
|
345
|
+
# Optional: Add non-stream test if needed
|
|
346
|
+
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
347
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
348
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
349
|
+
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
350
|
+
|
|
319
351
|
except Exception as e:
|
|
320
|
-
|
|
352
|
+
# Print full error for debugging
|
|
353
|
+
print(f"\r{model:<50} {'✗':<10} Error: {str(e)}")
|