webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +180 -78
- webscout/Bing_search.py +417 -0
- webscout/Extra/gguf.py +706 -177
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/genspark_search.py +7 -7
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/ExaChat.py +84 -58
- webscout/Provider/GeminiProxy.py +140 -0
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/MCPCore.py +78 -75
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +34 -51
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
- webscout/Provider/OPENAI/GeminiProxy.py +328 -0
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +32 -29
- webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +17 -1
- webscout/Provider/OPENAI/autoproxy.py +1067 -39
- webscout/Provider/OPENAI/base.py +17 -76
- webscout/Provider/OPENAI/deepinfra.py +42 -108
- webscout/Provider/OPENAI/e2b.py +0 -1
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/mcpcore.py +109 -70
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/scirachat.py +59 -51
- webscout/Provider/OPENAI/toolbaz.py +3 -9
- webscout/Provider/OPENAI/typegpt.py +1 -1
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OPENAI/xenai.py +514 -0
- webscout/Provider/OPENAI/yep.py +8 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/bing.py +231 -0
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TTS/speechma.py +45 -39
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +78 -70
- webscout/Provider/TogetherAI.py +350 -0
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/XenAI.py +324 -0
- webscout/Provider/__init__.py +10 -5
- webscout/Provider/deepseek_assistant.py +378 -0
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +159 -96
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +41 -46
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +33 -17
- webscout/Provider/yep.py +79 -96
- webscout/auth/__init__.py +55 -0
- webscout/auth/api_key_manager.py +189 -0
- webscout/auth/auth_system.py +100 -0
- webscout/auth/config.py +76 -0
- webscout/auth/database.py +400 -0
- webscout/auth/exceptions.py +67 -0
- webscout/auth/middleware.py +248 -0
- webscout/auth/models.py +130 -0
- webscout/auth/providers.py +279 -0
- webscout/auth/rate_limiter.py +254 -0
- webscout/auth/request_models.py +127 -0
- webscout/auth/request_processing.py +226 -0
- webscout/auth/routes.py +550 -0
- webscout/auth/schemas.py +103 -0
- webscout/auth/server.py +367 -0
- webscout/client.py +121 -70
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/scout/core/scout.py +104 -26
- webscout/scout/element.py +139 -18
- webscout/swiftcli/core/cli.py +14 -3
- webscout/swiftcli/decorators/output.py +59 -9
- webscout/update_checker.py +31 -49
- webscout/version.py +1 -1
- webscout/webscout_search.py +4 -12
- webscout/webscout_search_async.py +3 -10
- webscout/yep_search.py +2 -11
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/OPENAI/api.py +0 -1320
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
webscout/Provider/scnet.py
CHANGED
|
@@ -185,24 +185,24 @@ class SCNet(Provider):
|
|
|
185
185
|
stream: bool = False,
|
|
186
186
|
optimizer: Optional[str] = None,
|
|
187
187
|
conversationally: bool = False,
|
|
188
|
+
raw: bool = False, # Added raw parameter
|
|
188
189
|
) -> Union[str, Generator[str, None, None]]:
|
|
189
190
|
def for_stream_chat():
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
191
|
+
for response in self.ask(
|
|
192
|
+
prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
193
|
+
):
|
|
194
|
+
if raw:
|
|
195
|
+
yield response
|
|
196
|
+
else:
|
|
197
|
+
yield self.get_message(response)
|
|
198
198
|
def for_non_stream_chat():
|
|
199
|
-
# ask() returns dict or str when not streaming
|
|
200
199
|
response_data = self.ask(
|
|
201
|
-
prompt, stream=False, raw=
|
|
202
|
-
optimizer=optimizer, conversationally=conversationally
|
|
200
|
+
prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
203
201
|
)
|
|
204
|
-
|
|
205
|
-
|
|
202
|
+
if raw:
|
|
203
|
+
return response_data if isinstance(response_data, str) else self.get_message(response_data)
|
|
204
|
+
else:
|
|
205
|
+
return self.get_message(response_data)
|
|
206
206
|
return for_stream_chat() if stream else for_non_stream_chat()
|
|
207
207
|
|
|
208
208
|
def get_message(self, response: dict) -> str:
|
webscout/Provider/searchchat.py
CHANGED
|
@@ -232,6 +232,7 @@ class SearchChatAI(Provider):
|
|
|
232
232
|
stream: bool = False,
|
|
233
233
|
optimizer: str = None,
|
|
234
234
|
conversationally: bool = False,
|
|
235
|
+
raw: bool = False, # Added raw parameter
|
|
235
236
|
) -> Union[str, Generator[str, None, None]]:
|
|
236
237
|
"""
|
|
237
238
|
Chat with the API.
|
|
@@ -246,22 +247,21 @@ class SearchChatAI(Provider):
|
|
|
246
247
|
Either a string response or a generator for streaming
|
|
247
248
|
"""
|
|
248
249
|
def for_stream_chat():
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
250
|
+
for response in self.ask(
|
|
251
|
+
prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
252
|
+
):
|
|
253
|
+
if raw:
|
|
254
|
+
yield response
|
|
255
|
+
else:
|
|
256
|
+
yield self.get_message(response)
|
|
257
257
|
def for_non_stream_chat():
|
|
258
|
-
# ask() returns dict or str when not streaming
|
|
259
258
|
response_data = self.ask(
|
|
260
|
-
prompt, stream=False, raw=
|
|
261
|
-
optimizer=optimizer, conversationally=conversationally
|
|
259
|
+
prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
262
260
|
)
|
|
263
|
-
|
|
264
|
-
|
|
261
|
+
if raw:
|
|
262
|
+
return response_data if isinstance(response_data, str) else self.get_message(response_data)
|
|
263
|
+
else:
|
|
264
|
+
return self.get_message(response_data)
|
|
265
265
|
return for_stream_chat() if stream else for_non_stream_chat()
|
|
266
266
|
|
|
267
267
|
def get_message(self, response: dict) -> str:
|
webscout/Provider/sonus.py
CHANGED
|
@@ -208,23 +208,24 @@ class SonusAI(Provider):
|
|
|
208
208
|
optimizer: str = None,
|
|
209
209
|
conversationally: bool = False,
|
|
210
210
|
reasoning: bool = False,
|
|
211
|
+
raw: bool = False, # Added raw parameter
|
|
211
212
|
) -> Union[str, Generator[str, None, None]]:
|
|
212
213
|
def for_stream_chat():
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
216
|
-
optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
|
|
214
|
+
for response in self.ask(
|
|
215
|
+
prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
|
|
217
216
|
):
|
|
218
|
-
|
|
219
|
-
|
|
217
|
+
if raw:
|
|
218
|
+
yield response
|
|
219
|
+
else:
|
|
220
|
+
yield self.get_message(response)
|
|
220
221
|
def for_non_stream_chat():
|
|
221
|
-
# ask() returns dict or str when raw=False/True
|
|
222
222
|
response_data = self.ask(
|
|
223
|
-
prompt, stream=False, raw=
|
|
224
|
-
optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
|
|
223
|
+
prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning
|
|
225
224
|
)
|
|
226
|
-
|
|
227
|
-
|
|
225
|
+
if raw:
|
|
226
|
+
return response_data if isinstance(response_data, str) else self.get_message(response_data)
|
|
227
|
+
else:
|
|
228
|
+
return self.get_message(response_data)
|
|
228
229
|
return for_stream_chat() if stream else for_non_stream_chat()
|
|
229
230
|
|
|
230
231
|
def get_message(self, response: dict) -> str:
|
webscout/Provider/toolbaz.py
CHANGED
|
@@ -24,6 +24,7 @@ class Toolbaz(Provider):
|
|
|
24
24
|
AVAILABLE_MODELS = [
|
|
25
25
|
"gemini-2.5-flash",
|
|
26
26
|
"gemini-2.0-flash-thinking",
|
|
27
|
+
"sonar",
|
|
27
28
|
"gemini-2.0-flash",
|
|
28
29
|
"gemini-1.5-flash",
|
|
29
30
|
"o3-mini",
|
|
@@ -223,14 +224,22 @@ class Toolbaz(Provider):
|
|
|
223
224
|
intro_value=None, # No simple prefix
|
|
224
225
|
to_json=False, # Content is text
|
|
225
226
|
content_extractor=self._toolbaz_extractor, # Use the tag remover
|
|
226
|
-
yield_raw_on_error=True # Yield even if extractor somehow fails (though unlikely for regex)
|
|
227
|
+
yield_raw_on_error=True, # Yield even if extractor somehow fails (though unlikely for regex)
|
|
228
|
+
raw=raw
|
|
227
229
|
)
|
|
228
230
|
|
|
229
231
|
for content_chunk in processed_stream:
|
|
230
232
|
# content_chunk is the string with tags removed
|
|
231
|
-
if
|
|
232
|
-
|
|
233
|
-
|
|
233
|
+
if isinstance(content_chunk, bytes):
|
|
234
|
+
content_chunk = content_chunk.decode('utf-8', errors='ignore')
|
|
235
|
+
if content_chunk is None:
|
|
236
|
+
continue
|
|
237
|
+
if raw:
|
|
238
|
+
yield content_chunk
|
|
239
|
+
else:
|
|
240
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
241
|
+
streaming_text += content_chunk
|
|
242
|
+
yield {"text": content_chunk}
|
|
234
243
|
|
|
235
244
|
self.last_response = {"text": streaming_text}
|
|
236
245
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
@@ -274,28 +283,36 @@ class Toolbaz(Provider):
|
|
|
274
283
|
stream: bool = False,
|
|
275
284
|
optimizer: Optional[str] = None,
|
|
276
285
|
conversationally: bool = False,
|
|
286
|
+
raw: bool = False, # Added raw parameter
|
|
277
287
|
) -> Union[str, Generator[str, None, None]]:
|
|
278
288
|
"""Generates a response from the Toolbaz API."""
|
|
279
289
|
def for_stream_chat():
|
|
280
290
|
# ask() yields dicts when raw=False
|
|
281
|
-
for
|
|
291
|
+
for response in self.ask(
|
|
282
292
|
prompt,
|
|
283
293
|
stream=True,
|
|
284
|
-
raw=
|
|
294
|
+
raw=raw,
|
|
285
295
|
optimizer=optimizer,
|
|
286
296
|
conversationally=conversationally
|
|
287
297
|
):
|
|
288
|
-
|
|
298
|
+
if raw:
|
|
299
|
+
yield response
|
|
300
|
+
else:
|
|
301
|
+
yield self.get_message(response)
|
|
289
302
|
|
|
290
303
|
def for_non_stream_chat():
|
|
291
304
|
# ask() returns a dict when stream=False
|
|
292
305
|
response_dict = self.ask(
|
|
293
306
|
prompt,
|
|
294
307
|
stream=False,
|
|
308
|
+
raw=raw,
|
|
295
309
|
optimizer=optimizer,
|
|
296
310
|
conversationally=conversationally,
|
|
297
311
|
)
|
|
298
|
-
|
|
312
|
+
if raw:
|
|
313
|
+
return response_dict
|
|
314
|
+
else:
|
|
315
|
+
return self.get_message(response_dict)
|
|
299
316
|
|
|
300
317
|
return for_stream_chat() if stream else for_non_stream_chat()
|
|
301
318
|
|
webscout/Provider/turboseek.py
CHANGED
|
@@ -136,65 +136,59 @@ class TurboSeek(Provider):
|
|
|
136
136
|
}
|
|
137
137
|
|
|
138
138
|
def for_stream():
|
|
139
|
-
try:
|
|
140
|
-
# Use curl_cffi session post with impersonate
|
|
139
|
+
try:
|
|
141
140
|
response = self.session.post(
|
|
142
141
|
self.chat_endpoint,
|
|
143
142
|
json=payload,
|
|
144
143
|
stream=True,
|
|
145
144
|
timeout=self.timeout,
|
|
146
|
-
impersonate="chrome120"
|
|
145
|
+
impersonate="chrome120"
|
|
147
146
|
)
|
|
148
147
|
if not response.ok:
|
|
149
148
|
raise exceptions.FailedToGenerateResponseError(
|
|
150
149
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
151
150
|
)
|
|
152
|
-
|
|
153
151
|
streaming_text = ""
|
|
154
|
-
# Use sanitize_stream with the custom extractor
|
|
155
152
|
processed_stream = sanitize_stream(
|
|
156
|
-
data=response.iter_content(chunk_size=None),
|
|
153
|
+
data=response.iter_content(chunk_size=None),
|
|
157
154
|
intro_value="data:",
|
|
158
|
-
to_json=True,
|
|
159
|
-
content_extractor=self._turboseek_extractor,
|
|
160
|
-
yield_raw_on_error=False
|
|
155
|
+
to_json=True,
|
|
156
|
+
content_extractor=self._turboseek_extractor,
|
|
157
|
+
yield_raw_on_error=False,
|
|
158
|
+
raw=raw
|
|
161
159
|
)
|
|
162
|
-
|
|
163
160
|
for content_chunk in processed_stream:
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
161
|
+
if isinstance(content_chunk, bytes):
|
|
162
|
+
content_chunk = content_chunk.decode('utf-8', errors='ignore')
|
|
163
|
+
if content_chunk is None:
|
|
164
|
+
continue
|
|
165
|
+
if raw:
|
|
166
|
+
yield content_chunk
|
|
167
|
+
else:
|
|
168
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
169
|
+
streaming_text += content_chunk
|
|
170
|
+
self.last_response.update(dict(text=streaming_text))
|
|
171
|
+
yield dict(text=content_chunk)
|
|
172
|
+
if streaming_text:
|
|
172
173
|
self.conversation.update_chat_history(
|
|
173
|
-
prompt, streaming_text
|
|
174
|
+
prompt, streaming_text
|
|
174
175
|
)
|
|
175
|
-
except CurlError as e:
|
|
176
|
+
except CurlError as e:
|
|
176
177
|
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
177
|
-
except Exception as e:
|
|
178
|
+
except Exception as e:
|
|
178
179
|
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
179
|
-
|
|
180
|
-
|
|
181
180
|
def for_non_stream():
|
|
182
|
-
# Aggregate the stream using the updated for_stream logic
|
|
183
181
|
full_text = ""
|
|
184
182
|
try:
|
|
185
|
-
# Ensure raw=False so for_stream yields dicts
|
|
186
183
|
for chunk_data in for_stream():
|
|
187
184
|
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
188
185
|
full_text += chunk_data["text"]
|
|
189
|
-
elif isinstance(chunk_data, str):
|
|
186
|
+
elif isinstance(chunk_data, str):
|
|
190
187
|
full_text += chunk_data
|
|
191
188
|
except Exception as e:
|
|
192
189
|
raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {e}") from e
|
|
193
|
-
|
|
194
|
-
# Ensure last_response reflects the complete aggregated text
|
|
195
|
-
self.last_response = {"text": full_text}
|
|
190
|
+
self.last_response = {"text": full_text}
|
|
196
191
|
return self.last_response
|
|
197
|
-
|
|
198
192
|
return for_stream() if stream else for_non_stream()
|
|
199
193
|
|
|
200
194
|
def chat(
|
|
@@ -203,6 +197,7 @@ class TurboSeek(Provider):
|
|
|
203
197
|
stream: bool = False,
|
|
204
198
|
optimizer: str = None,
|
|
205
199
|
conversationally: bool = False,
|
|
200
|
+
raw: bool = False, # Added raw parameter
|
|
206
201
|
) -> str:
|
|
207
202
|
"""Generate response `str`
|
|
208
203
|
Args:
|
|
@@ -216,20 +211,24 @@ class TurboSeek(Provider):
|
|
|
216
211
|
|
|
217
212
|
def for_stream():
|
|
218
213
|
for response in self.ask(
|
|
219
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
214
|
+
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
220
215
|
):
|
|
221
|
-
|
|
222
|
-
|
|
216
|
+
if raw:
|
|
217
|
+
yield response
|
|
218
|
+
else:
|
|
219
|
+
yield self.get_message(response)
|
|
223
220
|
def for_non_stream():
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
)
|
|
221
|
+
result = self.ask(
|
|
222
|
+
prompt,
|
|
223
|
+
False,
|
|
224
|
+
raw=raw,
|
|
225
|
+
optimizer=optimizer,
|
|
226
|
+
conversationally=conversationally,
|
|
231
227
|
)
|
|
232
|
-
|
|
228
|
+
if raw:
|
|
229
|
+
return result
|
|
230
|
+
else:
|
|
231
|
+
return self.get_message(result)
|
|
233
232
|
return for_stream() if stream else for_non_stream()
|
|
234
233
|
|
|
235
234
|
def get_message(self, response: dict) -> str:
|
|
@@ -251,7 +250,7 @@ if __name__ == '__main__':
|
|
|
251
250
|
try: # Add try-except block for testing
|
|
252
251
|
ai = TurboSeek(timeout=60)
|
|
253
252
|
print("[bold blue]Testing Stream:[/bold blue]")
|
|
254
|
-
response_stream = ai.chat("yooooooooooo", stream=True)
|
|
253
|
+
response_stream = ai.chat("yooooooooooo", stream=True, raw=False)
|
|
255
254
|
for chunk in response_stream:
|
|
256
255
|
print(chunk, end="", flush=True)
|
|
257
256
|
# Optional: Test non-stream
|
webscout/Provider/typefully.py
CHANGED
|
@@ -126,11 +126,19 @@ class TypefullyAI(Provider):
|
|
|
126
126
|
intro_value=None,
|
|
127
127
|
to_json=False,
|
|
128
128
|
content_extractor=self._typefully_extractor,
|
|
129
|
+
raw=raw
|
|
129
130
|
)
|
|
130
131
|
for content_chunk in processed_stream:
|
|
131
|
-
if
|
|
132
|
-
|
|
133
|
-
|
|
132
|
+
if isinstance(content_chunk, bytes):
|
|
133
|
+
content_chunk = content_chunk.decode('utf-8', errors='ignore')
|
|
134
|
+
if content_chunk is None:
|
|
135
|
+
continue
|
|
136
|
+
if raw:
|
|
137
|
+
yield content_chunk
|
|
138
|
+
else:
|
|
139
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
140
|
+
streaming_text += content_chunk
|
|
141
|
+
yield dict(text=content_chunk)
|
|
134
142
|
self.last_response.update(dict(text=streaming_text))
|
|
135
143
|
self.conversation.update_chat_history(
|
|
136
144
|
prompt, self.get_message(self.last_response)
|
|
@@ -151,21 +159,28 @@ class TypefullyAI(Provider):
|
|
|
151
159
|
stream: bool = False,
|
|
152
160
|
optimizer: str = None,
|
|
153
161
|
conversationally: bool = False,
|
|
162
|
+
raw: bool = False, # Added raw parameter
|
|
154
163
|
) -> str:
|
|
155
164
|
def for_stream():
|
|
156
165
|
for response in self.ask(
|
|
157
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
166
|
+
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
158
167
|
):
|
|
159
|
-
|
|
168
|
+
if raw:
|
|
169
|
+
yield response
|
|
170
|
+
else:
|
|
171
|
+
yield self.get_message(response)
|
|
160
172
|
def for_non_stream():
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
)
|
|
173
|
+
result = self.ask(
|
|
174
|
+
prompt,
|
|
175
|
+
False,
|
|
176
|
+
raw=raw,
|
|
177
|
+
optimizer=optimizer,
|
|
178
|
+
conversationally=conversationally,
|
|
168
179
|
)
|
|
180
|
+
if raw:
|
|
181
|
+
return result
|
|
182
|
+
else:
|
|
183
|
+
return self.get_message(result)
|
|
169
184
|
return for_stream() if stream else for_non_stream()
|
|
170
185
|
|
|
171
186
|
def get_message(self, response: dict) -> str:
|
webscout/Provider/typegpt.py
CHANGED
|
@@ -17,6 +17,7 @@ class TypeGPT(Provider):
|
|
|
17
17
|
AVAILABLE_MODELS = [
|
|
18
18
|
# Working Models (based on testing)
|
|
19
19
|
# "gpt-4o-mini-2024-07-18",
|
|
20
|
+
"gpt-4o-mini",
|
|
20
21
|
"chatgpt-4o-latest",
|
|
21
22
|
"deepseek-r1",
|
|
22
23
|
"deepseek-v3",
|
|
@@ -106,7 +107,6 @@ class TypeGPT(Provider):
|
|
|
106
107
|
raise exceptions.FailedToGenerateResponseError(
|
|
107
108
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
108
109
|
)
|
|
109
|
-
|
|
110
110
|
payload = {
|
|
111
111
|
"messages": [
|
|
112
112
|
{"role": "system", "content": self.system_prompt},
|
|
@@ -120,10 +120,8 @@ class TypeGPT(Provider):
|
|
|
120
120
|
"top_p": self.top_p,
|
|
121
121
|
"max_tokens": self.max_tokens_to_sample,
|
|
122
122
|
}
|
|
123
|
-
|
|
124
123
|
def for_stream():
|
|
125
124
|
try:
|
|
126
|
-
# Use curl_cffi session post with impersonate
|
|
127
125
|
response = self.session.post(
|
|
128
126
|
self.api_endpoint,
|
|
129
127
|
headers=self.headers,
|
|
@@ -136,36 +134,33 @@ class TypeGPT(Provider):
|
|
|
136
134
|
raise exceptions.FailedToGenerateResponseError(
|
|
137
135
|
f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
|
|
138
136
|
) from ce
|
|
139
|
-
|
|
140
137
|
response.raise_for_status() # Check for HTTP errors first
|
|
141
|
-
|
|
142
138
|
streaming_text = ""
|
|
143
|
-
# Use sanitize_stream
|
|
144
139
|
processed_stream = sanitize_stream(
|
|
145
140
|
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
146
141
|
intro_value="data:",
|
|
147
142
|
to_json=True, # Stream sends JSON
|
|
148
143
|
skip_markers=["[DONE]"],
|
|
149
144
|
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
|
|
150
|
-
yield_raw_on_error=False
|
|
145
|
+
yield_raw_on_error=False,
|
|
146
|
+
raw=raw
|
|
151
147
|
)
|
|
152
|
-
|
|
153
148
|
for content_chunk in processed_stream:
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
149
|
+
if isinstance(content_chunk, bytes):
|
|
150
|
+
content_chunk = content_chunk.decode('utf-8', errors='ignore')
|
|
151
|
+
if content_chunk is None:
|
|
152
|
+
continue
|
|
153
|
+
if raw:
|
|
154
|
+
yield content_chunk
|
|
155
|
+
else:
|
|
156
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
157
|
+
streaming_text += content_chunk
|
|
158
|
+
yield dict(text=content_chunk)
|
|
159
|
+
self.last_response = dict(text=streaming_text)
|
|
160
|
+
if streaming_text:
|
|
163
161
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
164
|
-
|
|
165
|
-
|
|
166
162
|
def for_non_stream():
|
|
167
163
|
try:
|
|
168
|
-
# Use curl_cffi session post with impersonate
|
|
169
164
|
response = self.session.post(
|
|
170
165
|
self.api_endpoint,
|
|
171
166
|
headers=self.headers,
|
|
@@ -177,34 +172,32 @@ class TypeGPT(Provider):
|
|
|
177
172
|
raise exceptions.FailedToGenerateResponseError(
|
|
178
173
|
f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
|
|
179
174
|
) from ce
|
|
180
|
-
|
|
181
175
|
response.raise_for_status() # Check for HTTP errors
|
|
182
|
-
|
|
183
176
|
try:
|
|
184
177
|
response_text = response.text # Get raw text
|
|
185
|
-
|
|
186
|
-
# Use sanitize_stream for non-streaming JSON response
|
|
187
178
|
processed_stream = sanitize_stream(
|
|
188
179
|
data=response_text,
|
|
189
180
|
to_json=True, # Parse the whole text as JSON
|
|
190
181
|
intro_value=None,
|
|
191
|
-
# Extractor for non-stream structure
|
|
192
182
|
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('message', {}).get('content') if isinstance(chunk, dict) else None,
|
|
193
|
-
yield_raw_on_error=False
|
|
183
|
+
yield_raw_on_error=False,
|
|
184
|
+
raw=raw
|
|
194
185
|
)
|
|
195
|
-
|
|
196
|
-
# Extract the single result
|
|
197
186
|
content = ""
|
|
198
187
|
for extracted_content in processed_stream:
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
188
|
+
if isinstance(extracted_content, bytes):
|
|
189
|
+
extracted_content = extracted_content.decode('utf-8', errors='ignore')
|
|
190
|
+
if extracted_content is None:
|
|
191
|
+
continue
|
|
192
|
+
if raw:
|
|
193
|
+
content += extracted_content
|
|
194
|
+
else:
|
|
195
|
+
content = extracted_content if isinstance(extracted_content, str) else ""
|
|
196
|
+
self.last_response = {"text": content}
|
|
202
197
|
self.conversation.update_chat_history(prompt, content)
|
|
203
|
-
return self.last_response
|
|
204
|
-
except (json.JSONDecodeError, Exception) as je:
|
|
198
|
+
return self.last_response if not raw else content
|
|
199
|
+
except (json.JSONDecodeError, Exception) as je:
|
|
205
200
|
raise exceptions.FailedToGenerateResponseError(f"Failed to decode JSON response: {je} - Response text: {response.text}")
|
|
206
|
-
|
|
207
|
-
|
|
208
201
|
return for_stream() if stream else for_non_stream()
|
|
209
202
|
|
|
210
203
|
def chat(
|
|
@@ -213,25 +206,27 @@ class TypeGPT(Provider):
|
|
|
213
206
|
stream: bool = False,
|
|
214
207
|
optimizer: str = None,
|
|
215
208
|
conversationally: bool = False,
|
|
209
|
+
raw: bool = False, # Added raw parameter
|
|
216
210
|
) -> Union[str, Generator[str, None, None]]:
|
|
217
|
-
"""Generate response string or stream."""
|
|
218
211
|
if stream:
|
|
219
|
-
# ask() yields dicts or strings when streaming
|
|
220
212
|
gen = self.ask(
|
|
221
|
-
prompt, stream=True, raw=
|
|
213
|
+
prompt, stream=True, raw=raw, # Ensure ask yields dicts or raw
|
|
222
214
|
optimizer=optimizer, conversationally=conversationally
|
|
223
215
|
)
|
|
224
|
-
for
|
|
225
|
-
|
|
226
|
-
|
|
216
|
+
for chunk in gen:
|
|
217
|
+
if raw:
|
|
218
|
+
yield chunk
|
|
219
|
+
else:
|
|
220
|
+
yield self.get_message(chunk)
|
|
227
221
|
else:
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
prompt, stream=False,
|
|
222
|
+
response = self.ask(
|
|
223
|
+
prompt, stream=False, raw=raw,
|
|
231
224
|
optimizer=optimizer, conversationally=conversationally
|
|
232
225
|
)
|
|
233
|
-
|
|
234
|
-
|
|
226
|
+
if raw:
|
|
227
|
+
return response
|
|
228
|
+
else:
|
|
229
|
+
return self.get_message(response)
|
|
235
230
|
def get_message(self, response: Dict[str, Any]) -> str:
|
|
236
231
|
"""Retrieves message from response."""
|
|
237
232
|
if isinstance(response, dict):
|