webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +146 -37
- webscout/Bing_search.py +1 -2
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/ExaChat.py +84 -58
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +34 -51
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +30 -29
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +3 -1
- webscout/Provider/OPENAI/autoproxy.py +752 -17
- webscout/Provider/OPENAI/base.py +7 -76
- webscout/Provider/OPENAI/deepinfra.py +42 -108
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/typegpt.py +1 -1
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +78 -70
- webscout/Provider/TogetherAI.py +32 -48
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +159 -96
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +41 -46
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +33 -17
- webscout/Provider/yep.py +79 -96
- webscout/auth/__init__.py +12 -1
- webscout/auth/providers.py +27 -5
- webscout/auth/routes.py +128 -104
- webscout/auth/server.py +367 -312
- webscout/client.py +121 -116
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/version.py +1 -1
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- webscout/auth/static/favicon.svg +0 -11
- webscout/auth/swagger_ui.py +0 -203
- webscout/auth/templates/components/authentication.html +0 -237
- webscout/auth/templates/components/base.html +0 -103
- webscout/auth/templates/components/endpoints.html +0 -750
- webscout/auth/templates/components/examples.html +0 -491
- webscout/auth/templates/components/footer.html +0 -75
- webscout/auth/templates/components/header.html +0 -27
- webscout/auth/templates/components/models.html +0 -286
- webscout/auth/templates/components/navigation.html +0 -70
- webscout/auth/templates/static/api.js +0 -455
- webscout/auth/templates/static/icons.js +0 -168
- webscout/auth/templates/static/main.js +0 -784
- webscout/auth/templates/static/particles.js +0 -201
- webscout/auth/templates/static/styles.css +0 -3353
- webscout/auth/templates/static/ui.js +0 -374
- webscout/auth/templates/swagger_ui.html +0 -170
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
|
@@ -163,6 +163,7 @@ class TeachAnything(Provider):
|
|
|
163
163
|
stream: bool = False, # Keep stream param for interface consistency
|
|
164
164
|
optimizer: str = None,
|
|
165
165
|
conversationally: bool = False,
|
|
166
|
+
raw: bool = False, # Added raw parameter
|
|
166
167
|
) -> Union[str, Any]:
|
|
167
168
|
"""Generate response `str` or yield for streaming compatibility
|
|
168
169
|
Args:
|
|
@@ -170,22 +171,29 @@ class TeachAnything(Provider):
|
|
|
170
171
|
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
171
172
|
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
172
173
|
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
174
|
+
raw (bool, optional): If True, return raw string output.
|
|
173
175
|
Returns:
|
|
174
176
|
str or generator: Response generated
|
|
175
177
|
"""
|
|
176
178
|
response_data = self.ask(
|
|
177
179
|
prompt,
|
|
178
180
|
stream=False, # Call ask in non-stream mode internally
|
|
179
|
-
raw=
|
|
181
|
+
raw=raw, # Pass raw flag
|
|
180
182
|
optimizer=optimizer,
|
|
181
183
|
conversationally=conversationally
|
|
182
184
|
)
|
|
183
185
|
if stream:
|
|
184
186
|
def stream_wrapper():
|
|
185
|
-
|
|
187
|
+
if raw:
|
|
188
|
+
yield response_data if isinstance(response_data, str) else self.get_message(response_data)
|
|
189
|
+
else:
|
|
190
|
+
yield self.get_message(response_data)
|
|
186
191
|
return stream_wrapper()
|
|
187
192
|
else:
|
|
188
|
-
|
|
193
|
+
if raw:
|
|
194
|
+
return response_data if isinstance(response_data, str) else self.get_message(response_data)
|
|
195
|
+
else:
|
|
196
|
+
return self.get_message(response_data)
|
|
189
197
|
|
|
190
198
|
def get_message(self, response: Union[dict, str]) -> str:
|
|
191
199
|
"""Retrieves message only from response
|
|
@@ -135,95 +135,98 @@ class TextPollinationsAI(Provider):
|
|
|
135
135
|
payload["tool_choice"] = tool_choice
|
|
136
136
|
|
|
137
137
|
def for_stream():
|
|
138
|
-
try:
|
|
139
|
-
# Use curl_cffi session post with impersonate
|
|
138
|
+
try:
|
|
140
139
|
response = self.session.post(
|
|
141
140
|
self.api_endpoint,
|
|
142
|
-
# headers are set on the session
|
|
143
141
|
json=payload,
|
|
144
142
|
stream=True,
|
|
145
143
|
timeout=self.timeout,
|
|
146
|
-
impersonate="chrome120"
|
|
144
|
+
impersonate="chrome120"
|
|
147
145
|
)
|
|
148
|
-
|
|
149
146
|
if not response.ok:
|
|
150
147
|
raise exceptions.FailedToGenerateResponseError(
|
|
151
148
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
152
149
|
)
|
|
153
|
-
|
|
154
150
|
streaming_text = ""
|
|
155
|
-
# Use sanitize_stream
|
|
156
151
|
processed_stream = sanitize_stream(
|
|
157
|
-
data=response.iter_content(chunk_size=None),
|
|
152
|
+
data=response.iter_content(chunk_size=None),
|
|
158
153
|
intro_value="data:",
|
|
159
|
-
to_json=True,
|
|
154
|
+
to_json=True,
|
|
160
155
|
skip_markers=["[DONE]"],
|
|
161
|
-
# Extractor handles both content and tool_calls
|
|
162
156
|
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta') if isinstance(chunk, dict) else None,
|
|
163
|
-
yield_raw_on_error=False
|
|
157
|
+
yield_raw_on_error=False,
|
|
158
|
+
raw=raw
|
|
164
159
|
)
|
|
165
|
-
|
|
166
160
|
for delta in processed_stream:
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
161
|
+
if isinstance(delta, bytes):
|
|
162
|
+
delta = delta.decode('utf-8', errors='ignore')
|
|
163
|
+
if delta is None:
|
|
164
|
+
continue
|
|
165
|
+
if raw:
|
|
166
|
+
# Only yield content or tool_calls as string
|
|
167
|
+
if isinstance(delta, dict):
|
|
168
|
+
if 'content' in delta and delta['content'] is not None:
|
|
169
|
+
content = delta['content']
|
|
170
|
+
streaming_text += content
|
|
171
|
+
yield content
|
|
172
|
+
elif 'tool_calls' in delta:
|
|
173
|
+
tool_calls = delta['tool_calls']
|
|
174
|
+
yield json.dumps(tool_calls)
|
|
175
|
+
elif isinstance(delta, str):
|
|
176
|
+
streaming_text += delta
|
|
177
|
+
yield delta
|
|
178
|
+
else:
|
|
179
|
+
if isinstance(delta, dict):
|
|
180
|
+
if 'content' in delta and delta['content'] is not None:
|
|
181
|
+
content = delta['content']
|
|
182
|
+
streaming_text += content
|
|
183
|
+
yield dict(text=content)
|
|
184
|
+
elif 'tool_calls' in delta:
|
|
185
|
+
tool_calls = delta['tool_calls']
|
|
186
|
+
yield dict(tool_calls=tool_calls)
|
|
187
|
+
self.last_response.update(dict(text=streaming_text))
|
|
188
|
+
if streaming_text:
|
|
180
189
|
self.conversation.update_chat_history(
|
|
181
|
-
prompt, streaming_text
|
|
190
|
+
prompt, streaming_text
|
|
182
191
|
)
|
|
183
|
-
except CurlError as e:
|
|
192
|
+
except CurlError as e:
|
|
184
193
|
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
185
|
-
except Exception as e:
|
|
194
|
+
except Exception as e:
|
|
186
195
|
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
|
|
187
|
-
|
|
188
|
-
|
|
189
196
|
def for_non_stream():
|
|
190
|
-
# Aggregate the stream using the updated for_stream logic
|
|
191
197
|
final_content = ""
|
|
192
|
-
tool_calls_aggregated = None
|
|
193
|
-
try:
|
|
198
|
+
tool_calls_aggregated = None
|
|
199
|
+
try:
|
|
194
200
|
for chunk_data in for_stream():
|
|
195
|
-
if
|
|
196
|
-
if
|
|
197
|
-
final_content += chunk_data
|
|
198
|
-
elif
|
|
199
|
-
|
|
201
|
+
if raw:
|
|
202
|
+
if isinstance(chunk_data, str):
|
|
203
|
+
final_content += chunk_data
|
|
204
|
+
elif isinstance(chunk_data, bytes):
|
|
205
|
+
final_content += chunk_data.decode('utf-8', errors='ignore')
|
|
206
|
+
elif isinstance(chunk_data, list):
|
|
200
207
|
if tool_calls_aggregated is None:
|
|
201
208
|
tool_calls_aggregated = []
|
|
202
|
-
tool_calls_aggregated.extend(chunk_data
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
209
|
+
tool_calls_aggregated.extend(chunk_data)
|
|
210
|
+
else:
|
|
211
|
+
if isinstance(chunk_data, dict):
|
|
212
|
+
if "text" in chunk_data:
|
|
213
|
+
final_content += chunk_data["text"]
|
|
214
|
+
elif "tool_calls" in chunk_data:
|
|
215
|
+
if tool_calls_aggregated is None:
|
|
216
|
+
tool_calls_aggregated = []
|
|
217
|
+
tool_calls_aggregated.extend(chunk_data["tool_calls"])
|
|
218
|
+
elif isinstance(chunk_data, str):
|
|
219
|
+
final_content += chunk_data
|
|
210
220
|
except Exception as e:
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
# last_response and history are updated within for_stream (for text)
|
|
217
|
-
# Return a dict containing text and/or tool_calls
|
|
221
|
+
if not final_content and not tool_calls_aggregated:
|
|
222
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
218
223
|
result = {}
|
|
219
224
|
if final_content:
|
|
220
225
|
result["text"] = final_content
|
|
221
226
|
if tool_calls_aggregated:
|
|
222
227
|
result["tool_calls"] = tool_calls_aggregated
|
|
223
|
-
self.last_response = result
|
|
224
|
-
return self.last_response
|
|
225
|
-
|
|
226
|
-
|
|
228
|
+
self.last_response = result
|
|
229
|
+
return self.last_response if not raw else (final_content if final_content else json.dumps(tool_calls_aggregated) if tool_calls_aggregated else "")
|
|
227
230
|
return for_stream() if stream else for_non_stream()
|
|
228
231
|
|
|
229
232
|
def chat(
|
|
@@ -234,27 +237,32 @@ class TextPollinationsAI(Provider):
|
|
|
234
237
|
conversationally: bool = False,
|
|
235
238
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
236
239
|
tool_choice: Optional[Dict[str, Any]] = None,
|
|
240
|
+
raw: bool = False, # Added raw parameter
|
|
237
241
|
) -> Union[str, Generator[str, None, None]]:
|
|
238
242
|
"""Generate response as a string"""
|
|
239
243
|
def for_stream():
|
|
240
244
|
for response in self.ask(
|
|
241
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally,
|
|
245
|
+
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally,
|
|
242
246
|
tools=tools, tool_choice=tool_choice
|
|
243
247
|
):
|
|
244
|
-
|
|
245
|
-
|
|
248
|
+
if raw:
|
|
249
|
+
yield response
|
|
250
|
+
else:
|
|
251
|
+
yield self.get_message(response)
|
|
246
252
|
def for_non_stream():
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
)
|
|
253
|
+
result = self.ask(
|
|
254
|
+
prompt,
|
|
255
|
+
False,
|
|
256
|
+
raw=raw,
|
|
257
|
+
optimizer=optimizer,
|
|
258
|
+
conversationally=conversationally,
|
|
259
|
+
tools=tools,
|
|
260
|
+
tool_choice=tool_choice,
|
|
256
261
|
)
|
|
257
|
-
|
|
262
|
+
if raw:
|
|
263
|
+
return result if isinstance(result, str) else (result.get("text", "") if isinstance(result, dict) else str(result))
|
|
264
|
+
else:
|
|
265
|
+
return self.get_message(result)
|
|
258
266
|
return for_stream() if stream else for_non_stream()
|
|
259
267
|
|
|
260
268
|
def get_message(self, response: dict) -> str:
|
webscout/Provider/TogetherAI.py
CHANGED
|
@@ -15,21 +15,15 @@ class TogetherAI(Provider):
|
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
17
|
AVAILABLE_MODELS = [
|
|
18
|
-
"Gryphe/MythoMax-L2-13b",
|
|
19
|
-
"Gryphe/MythoMax-L2-13b-Lite",
|
|
20
18
|
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
|
21
19
|
"Qwen/QwQ-32B",
|
|
22
20
|
"Qwen/Qwen2-72B-Instruct",
|
|
23
21
|
"Qwen/Qwen2-VL-72B-Instruct",
|
|
24
22
|
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
25
23
|
"Qwen/Qwen2.5-7B-Instruct-Turbo",
|
|
26
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
27
24
|
"Qwen/Qwen2.5-VL-72B-Instruct",
|
|
28
|
-
"Qwen/Qwen3-235B-A22B-fp8",
|
|
29
25
|
"Qwen/Qwen3-235B-A22B-fp8-tput",
|
|
30
|
-
"
|
|
31
|
-
"Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-03dc18e1",
|
|
32
|
-
"Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-6c92f39d",
|
|
26
|
+
"Salesforce/Llama-Rank-V1",
|
|
33
27
|
"arcee-ai/arcee-blitz",
|
|
34
28
|
"arcee-ai/caller",
|
|
35
29
|
"arcee-ai/coder-large",
|
|
@@ -44,13 +38,12 @@ class TogetherAI(Provider):
|
|
|
44
38
|
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
|
45
39
|
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
|
46
40
|
"deepseek-ai/DeepSeek-V3",
|
|
47
|
-
"deepseek-ai/DeepSeek-V3-p-dp",
|
|
48
41
|
"google/gemma-2-27b-it",
|
|
49
|
-
"google/gemma-2b-it",
|
|
50
42
|
"lgai/exaone-3-5-32b-instruct",
|
|
51
43
|
"lgai/exaone-deep-32b",
|
|
52
44
|
"marin-community/marin-8b-instruct",
|
|
53
|
-
"meta-llama
|
|
45
|
+
"meta-llama-llama-2-70b-hf",
|
|
46
|
+
"meta-llama/Llama-2-70b-hf",
|
|
54
47
|
"meta-llama/Llama-3-8b-chat-hf",
|
|
55
48
|
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
|
56
49
|
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
|
@@ -72,14 +65,8 @@ class TogetherAI(Provider):
|
|
|
72
65
|
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
73
66
|
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
|
74
67
|
"perplexity-ai/r1-1776",
|
|
75
|
-
"roberizk@gmail.com/meta-llama/Llama-3-70b-chat-hf-26ee936b",
|
|
76
|
-
"roberizk@gmail.com/meta-llama/Meta-Llama-3-70B-Instruct-6feb41f7",
|
|
77
|
-
"roberizk@gmail.com/meta-llama/Meta-Llama-3-8B-Instruct-8ced8839",
|
|
78
68
|
"scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
|
|
79
|
-
"scb10x/scb10x-
|
|
80
|
-
"togethercomputer/MoA-1",
|
|
81
|
-
"togethercomputer/MoA-1-Turbo",
|
|
82
|
-
"togethercomputer/Refuel-Llm-V2",
|
|
69
|
+
"scb10x/scb10x-typhoon-2-1-gemma3-12b",
|
|
83
70
|
"togethercomputer/Refuel-Llm-V2-Small",
|
|
84
71
|
]
|
|
85
72
|
|
|
@@ -220,14 +207,10 @@ class TogetherAI(Provider):
|
|
|
220
207
|
)
|
|
221
208
|
else:
|
|
222
209
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
223
|
-
|
|
224
|
-
# Get API key if not already set
|
|
225
210
|
if not self.headers.get("Authorization"):
|
|
226
211
|
api_key = self.get_activation_key()
|
|
227
212
|
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
228
213
|
self.session.headers.update(self.headers)
|
|
229
|
-
|
|
230
|
-
# Payload construction
|
|
231
214
|
payload = {
|
|
232
215
|
"model": self.model,
|
|
233
216
|
"messages": [
|
|
@@ -236,7 +219,6 @@ class TogetherAI(Provider):
|
|
|
236
219
|
],
|
|
237
220
|
"stream": stream
|
|
238
221
|
}
|
|
239
|
-
|
|
240
222
|
def for_stream():
|
|
241
223
|
streaming_text = ""
|
|
242
224
|
try:
|
|
@@ -248,23 +230,27 @@ class TogetherAI(Provider):
|
|
|
248
230
|
impersonate="chrome110"
|
|
249
231
|
)
|
|
250
232
|
response.raise_for_status()
|
|
251
|
-
|
|
252
|
-
# Use sanitize_stream
|
|
253
233
|
processed_stream = sanitize_stream(
|
|
254
234
|
data=response.iter_content(chunk_size=None),
|
|
255
235
|
intro_value="data:",
|
|
256
236
|
to_json=True,
|
|
257
237
|
skip_markers=["[DONE]"],
|
|
258
238
|
content_extractor=self._togetherai_extractor,
|
|
259
|
-
yield_raw_on_error=False
|
|
239
|
+
yield_raw_on_error=False,
|
|
240
|
+
raw=raw
|
|
260
241
|
)
|
|
261
|
-
|
|
262
242
|
for content_chunk in processed_stream:
|
|
263
|
-
if
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
243
|
+
if isinstance(content_chunk, bytes):
|
|
244
|
+
content_chunk = content_chunk.decode('utf-8', errors='ignore')
|
|
245
|
+
if content_chunk is None:
|
|
246
|
+
continue
|
|
247
|
+
if raw:
|
|
248
|
+
yield content_chunk
|
|
249
|
+
else:
|
|
250
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
251
|
+
streaming_text += content_chunk
|
|
252
|
+
resp = dict(text=content_chunk)
|
|
253
|
+
yield resp
|
|
268
254
|
except CurlError as e:
|
|
269
255
|
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
270
256
|
except Exception as e:
|
|
@@ -273,7 +259,6 @@ class TogetherAI(Provider):
|
|
|
273
259
|
if streaming_text:
|
|
274
260
|
self.last_response = {"text": streaming_text}
|
|
275
261
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
276
|
-
|
|
277
262
|
def for_non_stream():
|
|
278
263
|
try:
|
|
279
264
|
response = self.session.post(
|
|
@@ -283,30 +268,25 @@ class TogetherAI(Provider):
|
|
|
283
268
|
impersonate="chrome110"
|
|
284
269
|
)
|
|
285
270
|
response.raise_for_status()
|
|
286
|
-
|
|
287
271
|
response_text = response.text
|
|
288
|
-
|
|
289
|
-
# Use sanitize_stream to parse the non-streaming JSON response
|
|
290
272
|
processed_stream = sanitize_stream(
|
|
291
273
|
data=response_text,
|
|
292
274
|
to_json=True,
|
|
293
275
|
intro_value=None,
|
|
294
276
|
content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
|
|
295
|
-
yield_raw_on_error=False
|
|
277
|
+
yield_raw_on_error=False,
|
|
278
|
+
raw=raw
|
|
296
279
|
)
|
|
297
|
-
content = next(processed_stream, None)
|
|
280
|
+
content = next((c for c in processed_stream if c is not None), None)
|
|
298
281
|
content = content if isinstance(content, str) else ""
|
|
299
|
-
|
|
300
282
|
self.last_response = {"text": content}
|
|
301
283
|
self.conversation.update_chat_history(prompt, content)
|
|
302
284
|
return self.last_response if not raw else content
|
|
303
|
-
|
|
304
285
|
except CurlError as e:
|
|
305
286
|
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
306
287
|
except Exception as e:
|
|
307
288
|
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
308
289
|
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
309
|
-
|
|
310
290
|
return for_stream() if stream else for_non_stream()
|
|
311
291
|
|
|
312
292
|
def chat(
|
|
@@ -315,23 +295,27 @@ class TogetherAI(Provider):
|
|
|
315
295
|
stream: bool = False,
|
|
316
296
|
optimizer: str = None,
|
|
317
297
|
conversationally: bool = False,
|
|
298
|
+
raw: bool = False, # Added raw parameter
|
|
318
299
|
) -> Union[str, Generator[str, None, None]]:
|
|
319
|
-
"""Generate response `str`"""
|
|
320
300
|
def for_stream_chat():
|
|
321
301
|
gen = self.ask(
|
|
322
|
-
prompt, stream=True, raw=
|
|
302
|
+
prompt, stream=True, raw=raw,
|
|
323
303
|
optimizer=optimizer, conversationally=conversationally
|
|
324
304
|
)
|
|
325
|
-
for
|
|
326
|
-
|
|
327
|
-
|
|
305
|
+
for response in gen:
|
|
306
|
+
if raw:
|
|
307
|
+
yield response
|
|
308
|
+
else:
|
|
309
|
+
yield self.get_message(response)
|
|
328
310
|
def for_non_stream_chat():
|
|
329
311
|
response_data = self.ask(
|
|
330
|
-
prompt, stream=False, raw=
|
|
312
|
+
prompt, stream=False, raw=raw,
|
|
331
313
|
optimizer=optimizer, conversationally=conversationally
|
|
332
314
|
)
|
|
333
|
-
|
|
334
|
-
|
|
315
|
+
if raw:
|
|
316
|
+
return response_data
|
|
317
|
+
else:
|
|
318
|
+
return self.get_message(response_data)
|
|
335
319
|
return for_stream_chat() if stream else for_non_stream_chat()
|
|
336
320
|
|
|
337
321
|
def get_message(self, response: dict) -> str:
|
webscout/Provider/Venice.py
CHANGED
|
@@ -20,7 +20,6 @@ class Venice(Provider):
|
|
|
20
20
|
AVAILABLE_MODELS = [
|
|
21
21
|
"mistral-31-24b",
|
|
22
22
|
"dolphin-3.0-mistral-24b",
|
|
23
|
-
"llama-3.2-3b-akash",
|
|
24
23
|
"qwen2dot5-coder-32b",
|
|
25
24
|
"deepseek-coder-v2-lite",
|
|
26
25
|
|
|
@@ -128,83 +127,69 @@ class Venice(Provider):
|
|
|
128
127
|
)
|
|
129
128
|
else:
|
|
130
129
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
131
|
-
|
|
132
|
-
# Update Payload construction based on successful request
|
|
133
130
|
payload = {
|
|
134
|
-
"requestId": str(uuid4())[:7],
|
|
131
|
+
"requestId": str(uuid4())[:7],
|
|
135
132
|
"modelId": self.model,
|
|
136
133
|
"prompt": [{"content": conversation_prompt, "role": "user"}],
|
|
137
|
-
"systemPrompt": self.system_prompt,
|
|
134
|
+
"systemPrompt": self.system_prompt,
|
|
138
135
|
"conversationType": "text",
|
|
139
|
-
"temperature": self.temperature,
|
|
140
|
-
"webEnabled": True,
|
|
141
|
-
"topP": self.top_p,
|
|
142
|
-
"includeVeniceSystemPrompt": True,
|
|
143
|
-
"isCharacter": False,
|
|
144
|
-
|
|
145
|
-
"
|
|
146
|
-
"isDefault": True,
|
|
136
|
+
"temperature": self.temperature,
|
|
137
|
+
"webEnabled": True,
|
|
138
|
+
"topP": self.top_p,
|
|
139
|
+
"includeVeniceSystemPrompt": True,
|
|
140
|
+
"isCharacter": False,
|
|
141
|
+
"userId": "user_anon_" + str(random.randint(1000000000, 9999999999)),
|
|
142
|
+
"isDefault": True,
|
|
147
143
|
"textToSpeech": {"voiceId": "af_sky", "speed": 1},
|
|
148
|
-
"clientProcessingTime": random.randint(10, 50)
|
|
144
|
+
"clientProcessingTime": random.randint(10, 50)
|
|
149
145
|
}
|
|
150
|
-
|
|
151
146
|
def for_stream():
|
|
152
147
|
try:
|
|
153
|
-
# Use curl_cffi session post
|
|
154
148
|
response = self.session.post(
|
|
155
149
|
self.api_endpoint,
|
|
156
150
|
json=payload,
|
|
157
151
|
stream=True,
|
|
158
152
|
timeout=self.timeout,
|
|
159
|
-
impersonate="edge101"
|
|
153
|
+
impersonate="edge101"
|
|
160
154
|
)
|
|
161
|
-
# Check response status after the call
|
|
162
155
|
if response.status_code != 200:
|
|
163
|
-
# Include response text in error
|
|
164
156
|
raise exceptions.FailedToGenerateResponseError(
|
|
165
157
|
f"Request failed with status code {response.status_code} - {response.text}"
|
|
166
158
|
)
|
|
167
|
-
|
|
168
159
|
streaming_text = ""
|
|
169
|
-
# Use sanitize_stream with the custom extractor
|
|
170
160
|
processed_stream = sanitize_stream(
|
|
171
|
-
data=response.iter_content(chunk_size=None),
|
|
172
|
-
intro_value=None,
|
|
173
|
-
to_json=True,
|
|
174
|
-
content_extractor=self._venice_extractor,
|
|
175
|
-
yield_raw_on_error=False
|
|
161
|
+
data=response.iter_content(chunk_size=None),
|
|
162
|
+
intro_value=None,
|
|
163
|
+
to_json=True,
|
|
164
|
+
content_extractor=self._venice_extractor,
|
|
165
|
+
yield_raw_on_error=False,
|
|
166
|
+
raw=raw
|
|
176
167
|
)
|
|
177
|
-
|
|
178
168
|
for content_chunk in processed_stream:
|
|
179
|
-
#
|
|
180
|
-
if
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
169
|
+
# Always yield as string, even in raw mode
|
|
170
|
+
if isinstance(content_chunk, bytes):
|
|
171
|
+
content_chunk = content_chunk.decode('utf-8', errors='ignore')
|
|
172
|
+
if raw:
|
|
173
|
+
yield content_chunk
|
|
174
|
+
else:
|
|
175
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
176
|
+
streaming_text += content_chunk
|
|
177
|
+
yield dict(text=content_chunk)
|
|
185
178
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
186
179
|
self.last_response = {"text": streaming_text}
|
|
187
|
-
|
|
188
180
|
except CurlError as e:
|
|
189
181
|
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
190
|
-
# Catch requests.exceptions.RequestException if needed, but CurlError is primary for curl_cffi
|
|
191
182
|
except Exception as e:
|
|
192
183
|
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
193
|
-
|
|
194
184
|
def for_non_stream():
|
|
195
185
|
full_text = ""
|
|
196
|
-
# Iterate through the generator provided by for_stream
|
|
197
186
|
for chunk_data in for_stream():
|
|
198
|
-
# Check if chunk_data is a dict (not raw) and has 'text'
|
|
199
187
|
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
200
188
|
full_text += chunk_data["text"]
|
|
201
|
-
# If raw=True, chunk_data is the string content itself
|
|
202
189
|
elif isinstance(chunk_data, str):
|
|
203
190
|
full_text += chunk_data
|
|
204
|
-
# Update last_response after aggregation
|
|
205
191
|
self.last_response = {"text": full_text}
|
|
206
192
|
return self.last_response
|
|
207
|
-
|
|
208
193
|
return for_stream() if stream else for_non_stream()
|
|
209
194
|
|
|
210
195
|
def chat(
|
|
@@ -213,14 +198,20 @@ class Venice(Provider):
|
|
|
213
198
|
stream: bool = False,
|
|
214
199
|
optimizer: str = None,
|
|
215
200
|
conversationally: bool = False,
|
|
201
|
+
raw: bool = False, # Added raw parameter
|
|
216
202
|
) -> Union[str, Generator]:
|
|
217
203
|
def for_stream():
|
|
218
|
-
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
219
|
-
|
|
204
|
+
for response in self.ask(prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally):
|
|
205
|
+
if raw:
|
|
206
|
+
yield response
|
|
207
|
+
else:
|
|
208
|
+
yield self.get_message(response)
|
|
220
209
|
def for_non_stream():
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
210
|
+
result = self.ask(prompt, False, raw=raw, optimizer=optimizer, conversationally=conversationally)
|
|
211
|
+
if raw:
|
|
212
|
+
return result
|
|
213
|
+
else:
|
|
214
|
+
return self.get_message(result)
|
|
224
215
|
return for_stream() if stream else for_non_stream()
|
|
225
216
|
|
|
226
217
|
def get_message(self, response: dict) -> str:
|