webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (114) hide show
  1. webscout/AIutel.py +180 -78
  2. webscout/Bing_search.py +417 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/ExaChat.py +84 -58
  8. webscout/Provider/GeminiProxy.py +140 -0
  9. webscout/Provider/HeckAI.py +85 -80
  10. webscout/Provider/Jadve.py +56 -50
  11. webscout/Provider/MCPCore.py +78 -75
  12. webscout/Provider/MiniMax.py +207 -0
  13. webscout/Provider/Nemotron.py +41 -13
  14. webscout/Provider/Netwrck.py +34 -51
  15. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
  16. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  17. webscout/Provider/OPENAI/MiniMax.py +298 -0
  18. webscout/Provider/OPENAI/README.md +32 -29
  19. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  20. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  21. webscout/Provider/OPENAI/__init__.py +17 -1
  22. webscout/Provider/OPENAI/autoproxy.py +1067 -39
  23. webscout/Provider/OPENAI/base.py +17 -76
  24. webscout/Provider/OPENAI/deepinfra.py +42 -108
  25. webscout/Provider/OPENAI/e2b.py +0 -1
  26. webscout/Provider/OPENAI/flowith.py +179 -166
  27. webscout/Provider/OPENAI/friendli.py +233 -0
  28. webscout/Provider/OPENAI/mcpcore.py +109 -70
  29. webscout/Provider/OPENAI/monochat.py +329 -0
  30. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  31. webscout/Provider/OPENAI/scirachat.py +59 -51
  32. webscout/Provider/OPENAI/toolbaz.py +3 -9
  33. webscout/Provider/OPENAI/typegpt.py +1 -1
  34. webscout/Provider/OPENAI/utils.py +19 -42
  35. webscout/Provider/OPENAI/x0gpt.py +14 -2
  36. webscout/Provider/OPENAI/xenai.py +514 -0
  37. webscout/Provider/OPENAI/yep.py +8 -2
  38. webscout/Provider/OpenGPT.py +54 -32
  39. webscout/Provider/PI.py +58 -84
  40. webscout/Provider/StandardInput.py +32 -13
  41. webscout/Provider/TTI/README.md +9 -9
  42. webscout/Provider/TTI/__init__.py +3 -1
  43. webscout/Provider/TTI/aiarta.py +92 -78
  44. webscout/Provider/TTI/bing.py +231 -0
  45. webscout/Provider/TTI/infip.py +212 -0
  46. webscout/Provider/TTI/monochat.py +220 -0
  47. webscout/Provider/TTS/speechma.py +45 -39
  48. webscout/Provider/TeachAnything.py +11 -3
  49. webscout/Provider/TextPollinationsAI.py +78 -70
  50. webscout/Provider/TogetherAI.py +350 -0
  51. webscout/Provider/Venice.py +37 -46
  52. webscout/Provider/VercelAI.py +27 -24
  53. webscout/Provider/WiseCat.py +35 -35
  54. webscout/Provider/WrDoChat.py +22 -26
  55. webscout/Provider/WritingMate.py +26 -22
  56. webscout/Provider/XenAI.py +324 -0
  57. webscout/Provider/__init__.py +10 -5
  58. webscout/Provider/deepseek_assistant.py +378 -0
  59. webscout/Provider/granite.py +48 -57
  60. webscout/Provider/koala.py +51 -39
  61. webscout/Provider/learnfastai.py +49 -64
  62. webscout/Provider/llmchat.py +79 -93
  63. webscout/Provider/llmchatco.py +63 -78
  64. webscout/Provider/multichat.py +51 -40
  65. webscout/Provider/oivscode.py +1 -1
  66. webscout/Provider/scira_chat.py +159 -96
  67. webscout/Provider/scnet.py +13 -13
  68. webscout/Provider/searchchat.py +13 -13
  69. webscout/Provider/sonus.py +12 -11
  70. webscout/Provider/toolbaz.py +25 -8
  71. webscout/Provider/turboseek.py +41 -42
  72. webscout/Provider/typefully.py +27 -12
  73. webscout/Provider/typegpt.py +41 -46
  74. webscout/Provider/uncovr.py +55 -90
  75. webscout/Provider/x0gpt.py +33 -17
  76. webscout/Provider/yep.py +79 -96
  77. webscout/auth/__init__.py +55 -0
  78. webscout/auth/api_key_manager.py +189 -0
  79. webscout/auth/auth_system.py +100 -0
  80. webscout/auth/config.py +76 -0
  81. webscout/auth/database.py +400 -0
  82. webscout/auth/exceptions.py +67 -0
  83. webscout/auth/middleware.py +248 -0
  84. webscout/auth/models.py +130 -0
  85. webscout/auth/providers.py +279 -0
  86. webscout/auth/rate_limiter.py +254 -0
  87. webscout/auth/request_models.py +127 -0
  88. webscout/auth/request_processing.py +226 -0
  89. webscout/auth/routes.py +550 -0
  90. webscout/auth/schemas.py +103 -0
  91. webscout/auth/server.py +367 -0
  92. webscout/client.py +121 -70
  93. webscout/litagent/Readme.md +68 -55
  94. webscout/litagent/agent.py +99 -9
  95. webscout/scout/core/scout.py +104 -26
  96. webscout/scout/element.py +139 -18
  97. webscout/swiftcli/core/cli.py +14 -3
  98. webscout/swiftcli/decorators/output.py +59 -9
  99. webscout/update_checker.py +31 -49
  100. webscout/version.py +1 -1
  101. webscout/webscout_search.py +4 -12
  102. webscout/webscout_search_async.py +3 -10
  103. webscout/yep_search.py +2 -11
  104. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
  105. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
  106. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
  107. webscout/Provider/HF_space/__init__.py +0 -0
  108. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  109. webscout/Provider/OPENAI/api.py +0 -1320
  110. webscout/Provider/TTI/fastflux.py +0 -233
  111. webscout/Provider/Writecream.py +0 -246
  112. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  113. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  114. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,350 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ from typing import Any, Dict, Optional, Generator, Union
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.litagent import LitAgent
11
+
12
+ class TogetherAI(Provider):
13
+ """
14
+ A class to interact with the TogetherAI API.
15
+ """
16
+
17
+ AVAILABLE_MODELS = [
18
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
19
+ "Qwen/QwQ-32B",
20
+ "Qwen/Qwen2-72B-Instruct",
21
+ "Qwen/Qwen2-VL-72B-Instruct",
22
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
23
+ "Qwen/Qwen2.5-7B-Instruct-Turbo",
24
+ "Qwen/Qwen2.5-VL-72B-Instruct",
25
+ "Qwen/Qwen3-235B-A22B-fp8-tput",
26
+ "Salesforce/Llama-Rank-V1",
27
+ "arcee-ai/arcee-blitz",
28
+ "arcee-ai/caller",
29
+ "arcee-ai/coder-large",
30
+ "arcee-ai/maestro-reasoning",
31
+ "arcee-ai/virtuoso-large",
32
+ "arcee-ai/virtuoso-medium-v2",
33
+ "arcee_ai/arcee-spotlight",
34
+ "blackbox/meta-llama-3-1-8b",
35
+ "deepseek-ai/DeepSeek-R1",
36
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
37
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
38
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
39
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
40
+ "deepseek-ai/DeepSeek-V3",
41
+ "google/gemma-2-27b-it",
42
+ "lgai/exaone-3-5-32b-instruct",
43
+ "lgai/exaone-deep-32b",
44
+ "marin-community/marin-8b-instruct",
45
+ "meta-llama-llama-2-70b-hf",
46
+ "meta-llama/Llama-2-70b-hf",
47
+ "meta-llama/Llama-3-8b-chat-hf",
48
+ "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
49
+ "meta-llama/Llama-3.2-3B-Instruct-Turbo",
50
+ "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
51
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
52
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
53
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
54
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
55
+ "meta-llama/Llama-Vision-Free",
56
+ "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
57
+ "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
58
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
59
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
60
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
61
+ "mistralai/Mistral-7B-Instruct-v0.1",
62
+ "mistralai/Mistral-7B-Instruct-v0.2",
63
+ "mistralai/Mistral-7B-Instruct-v0.3",
64
+ "mistralai/Mistral-Small-24B-Instruct-2501",
65
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
66
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
67
+ "perplexity-ai/r1-1776",
68
+ "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
69
+ "scb10x/scb10x-typhoon-2-1-gemma3-12b",
70
+ "togethercomputer/Refuel-Llm-V2-Small",
71
+ ]
72
+
73
+ @staticmethod
74
+ def _togetherai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
75
+ """Extracts content from TogetherAI stream JSON objects."""
76
+ if isinstance(chunk, dict):
77
+ return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
78
+ return None
79
+
80
+ def __init__(
81
+ self,
82
+ is_conversation: bool = True,
83
+ max_tokens: int = 2049,
84
+ timeout: int = 30,
85
+ intro: str = None,
86
+ filepath: str = None,
87
+ update_file: bool = True,
88
+ proxies: dict = {},
89
+ history_offset: int = 10250,
90
+ act: str = None,
91
+ model: str = "meta-llama/Llama-3.1-8B-Instruct-Turbo",
92
+ system_prompt: str = "You are a helpful assistant.",
93
+ browser: str = "chrome"
94
+ ):
95
+ """Initializes the TogetherAI API client."""
96
+ if model not in self.AVAILABLE_MODELS:
97
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
98
+
99
+ self.api_endpoint = "https://api.together.xyz/v1/chat/completions"
100
+ self.activation_endpoint = "https://www.codegeneration.ai/activate-v2"
101
+
102
+ # Initialize LitAgent
103
+ self.agent = LitAgent()
104
+ self.fingerprint = self.agent.generate_fingerprint(browser)
105
+
106
+ # Use the fingerprint for headers
107
+ self.headers = {
108
+ "Accept": self.fingerprint["accept"],
109
+ "Accept-Language": self.fingerprint["accept_language"],
110
+ "Content-Type": "application/json",
111
+ "Cache-Control": "no-cache",
112
+ "Origin": "https://www.codegeneration.ai",
113
+ "Pragma": "no-cache",
114
+ "Referer": "https://www.codegeneration.ai/",
115
+ "Sec-Fetch-Dest": "empty",
116
+ "Sec-Fetch-Mode": "cors",
117
+ "Sec-Fetch-Site": "same-site",
118
+ "User-Agent": self.fingerprint["user_agent"],
119
+ }
120
+
121
+ # Initialize curl_cffi Session
122
+ self.session = Session()
123
+ self.session.headers.update(self.headers)
124
+ self.session.proxies = proxies
125
+ self.system_prompt = system_prompt
126
+ self.is_conversation = is_conversation
127
+ self.max_tokens_to_sample = max_tokens
128
+ self.timeout = timeout
129
+ self.last_response = {}
130
+ self.model = model
131
+ self._api_key_cache = None
132
+
133
+ self.__available_optimizers = (
134
+ method
135
+ for method in dir(Optimizers)
136
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
137
+ )
138
+ Conversation.intro = (
139
+ AwesomePrompts().get_act(
140
+ act, raise_not_found=True, default=None, case_insensitive=True
141
+ )
142
+ if act
143
+ else intro or Conversation.intro
144
+ )
145
+
146
+ self.conversation = Conversation(
147
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
148
+ )
149
+ self.conversation.history_offset = history_offset
150
+
151
+ def refresh_identity(self, browser: str = None):
152
+ """
153
+ Refreshes the browser identity fingerprint.
154
+
155
+ Args:
156
+ browser: Specific browser to use for the new fingerprint
157
+ """
158
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
159
+ self.fingerprint = self.agent.generate_fingerprint(browser)
160
+
161
+ # Update headers with new fingerprint
162
+ self.headers.update({
163
+ "Accept": self.fingerprint["accept"],
164
+ "Accept-Language": self.fingerprint["accept_language"],
165
+ "User-Agent": self.fingerprint["user_agent"],
166
+ })
167
+
168
+ # Update session headers
169
+ self.session.headers.update(self.headers)
170
+
171
+ return self.fingerprint
172
+
173
+ def get_activation_key(self) -> str:
174
+ """Get API key from activation endpoint"""
175
+ if self._api_key_cache:
176
+ return self._api_key_cache
177
+
178
+ try:
179
+ response = self.session.get(
180
+ self.activation_endpoint,
181
+ headers={"Accept": "application/json"},
182
+ timeout=30
183
+ )
184
+ response.raise_for_status()
185
+ activation_data = response.json()
186
+ self._api_key_cache = activation_data["openAIParams"]["apiKey"]
187
+ return self._api_key_cache
188
+ except Exception as e:
189
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get activation key: {e}")
190
+
191
+ def ask(
192
+ self,
193
+ prompt: str,
194
+ stream: bool = False,
195
+ raw: bool = False,
196
+ optimizer: str = None,
197
+ conversationally: bool = False,
198
+ ) -> Union[Dict[str, Any], Generator]:
199
+ """
200
+ Sends a prompt to the TogetherAI API and returns the response.
201
+ """
202
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
203
+ if optimizer:
204
+ if optimizer in self.__available_optimizers:
205
+ conversation_prompt = getattr(Optimizers, optimizer)(
206
+ conversation_prompt if conversationally else prompt
207
+ )
208
+ else:
209
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
210
+ if not self.headers.get("Authorization"):
211
+ api_key = self.get_activation_key()
212
+ self.headers["Authorization"] = f"Bearer {api_key}"
213
+ self.session.headers.update(self.headers)
214
+ payload = {
215
+ "model": self.model,
216
+ "messages": [
217
+ {"role": "system", "content": self.system_prompt},
218
+ {"role": "user", "content": conversation_prompt},
219
+ ],
220
+ "stream": stream
221
+ }
222
+ def for_stream():
223
+ streaming_text = ""
224
+ try:
225
+ response = self.session.post(
226
+ self.api_endpoint,
227
+ json=payload,
228
+ stream=True,
229
+ timeout=self.timeout,
230
+ impersonate="chrome110"
231
+ )
232
+ response.raise_for_status()
233
+ processed_stream = sanitize_stream(
234
+ data=response.iter_content(chunk_size=None),
235
+ intro_value="data:",
236
+ to_json=True,
237
+ skip_markers=["[DONE]"],
238
+ content_extractor=self._togetherai_extractor,
239
+ yield_raw_on_error=False,
240
+ raw=raw
241
+ )
242
+ for content_chunk in processed_stream:
243
+ if isinstance(content_chunk, bytes):
244
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
245
+ if content_chunk is None:
246
+ continue
247
+ if raw:
248
+ yield content_chunk
249
+ else:
250
+ if content_chunk and isinstance(content_chunk, str):
251
+ streaming_text += content_chunk
252
+ resp = dict(text=content_chunk)
253
+ yield resp
254
+ except CurlError as e:
255
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
256
+ except Exception as e:
257
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
258
+ finally:
259
+ if streaming_text:
260
+ self.last_response = {"text": streaming_text}
261
+ self.conversation.update_chat_history(prompt, streaming_text)
262
+ def for_non_stream():
263
+ try:
264
+ response = self.session.post(
265
+ self.api_endpoint,
266
+ json=payload,
267
+ timeout=self.timeout,
268
+ impersonate="chrome110"
269
+ )
270
+ response.raise_for_status()
271
+ response_text = response.text
272
+ processed_stream = sanitize_stream(
273
+ data=response_text,
274
+ to_json=True,
275
+ intro_value=None,
276
+ content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
277
+ yield_raw_on_error=False,
278
+ raw=raw
279
+ )
280
+ content = next((c for c in processed_stream if c is not None), None)
281
+ content = content if isinstance(content, str) else ""
282
+ self.last_response = {"text": content}
283
+ self.conversation.update_chat_history(prompt, content)
284
+ return self.last_response if not raw else content
285
+ except CurlError as e:
286
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
287
+ except Exception as e:
288
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
289
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
290
+ return for_stream() if stream else for_non_stream()
291
+
292
+ def chat(
293
+ self,
294
+ prompt: str,
295
+ stream: bool = False,
296
+ optimizer: str = None,
297
+ conversationally: bool = False,
298
+ raw: bool = False, # Added raw parameter
299
+ ) -> Union[str, Generator[str, None, None]]:
300
+ def for_stream_chat():
301
+ gen = self.ask(
302
+ prompt, stream=True, raw=raw,
303
+ optimizer=optimizer, conversationally=conversationally
304
+ )
305
+ for response in gen:
306
+ if raw:
307
+ yield response
308
+ else:
309
+ yield self.get_message(response)
310
+ def for_non_stream_chat():
311
+ response_data = self.ask(
312
+ prompt, stream=False, raw=raw,
313
+ optimizer=optimizer, conversationally=conversationally
314
+ )
315
+ if raw:
316
+ return response_data
317
+ else:
318
+ return self.get_message(response_data)
319
+ return for_stream_chat() if stream else for_non_stream_chat()
320
+
321
+ def get_message(self, response: dict) -> str:
322
+ """Retrieves message only from response"""
323
+ assert isinstance(response, dict), "Response should be of dict data-type only"
324
+ return response["text"]
325
+
326
+
327
+ if __name__ == "__main__":
328
+ print("-" * 80)
329
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
330
+ print("-" * 80)
331
+
332
+ for model in TogetherAI.AVAILABLE_MODELS:
333
+ try:
334
+ test_ai = TogetherAI(model=model, timeout=60)
335
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
336
+ response_text = ""
337
+ for chunk in response:
338
+ response_text += chunk
339
+
340
+ if response_text and len(response_text.strip()) > 0:
341
+ status = "✓"
342
+ # Clean and truncate response
343
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
344
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
345
+ else:
346
+ status = "✗"
347
+ display_text = "Empty or invalid response"
348
+ print(f"\r{model:<50} {status:<10} {display_text}")
349
+ except Exception as e:
350
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -20,7 +20,6 @@ class Venice(Provider):
20
20
  AVAILABLE_MODELS = [
21
21
  "mistral-31-24b",
22
22
  "dolphin-3.0-mistral-24b",
23
- "llama-3.2-3b-akash",
24
23
  "qwen2dot5-coder-32b",
25
24
  "deepseek-coder-v2-lite",
26
25
 
@@ -128,83 +127,69 @@ class Venice(Provider):
128
127
  )
129
128
  else:
130
129
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
131
-
132
- # Update Payload construction based on successful request
133
130
  payload = {
134
- "requestId": str(uuid4())[:7], # Keep generating request ID
131
+ "requestId": str(uuid4())[:7],
135
132
  "modelId": self.model,
136
133
  "prompt": [{"content": conversation_prompt, "role": "user"}],
137
- "systemPrompt": self.system_prompt, # Use configured system prompt
134
+ "systemPrompt": self.system_prompt,
138
135
  "conversationType": "text",
139
- "temperature": self.temperature, # Use configured temperature
140
- "webEnabled": True, # Keep webEnabled
141
- "topP": self.top_p, # Use configured topP
142
- "includeVeniceSystemPrompt": True, # Set to True as per example
143
- "isCharacter": False, # Keep as False
144
- # Add missing fields from example payload
145
- "userId": "user_anon_" + str(random.randint(1000000000, 9999999999)), # Generate anon user ID
146
- "isDefault": True,
136
+ "temperature": self.temperature,
137
+ "webEnabled": True,
138
+ "topP": self.top_p,
139
+ "includeVeniceSystemPrompt": True,
140
+ "isCharacter": False,
141
+ "userId": "user_anon_" + str(random.randint(1000000000, 9999999999)),
142
+ "isDefault": True,
147
143
  "textToSpeech": {"voiceId": "af_sky", "speed": 1},
148
- "clientProcessingTime": random.randint(10, 50) # Randomize slightly
144
+ "clientProcessingTime": random.randint(10, 50)
149
145
  }
150
-
151
146
  def for_stream():
152
147
  try:
153
- # Use curl_cffi session post
154
148
  response = self.session.post(
155
149
  self.api_endpoint,
156
150
  json=payload,
157
151
  stream=True,
158
152
  timeout=self.timeout,
159
- impersonate="edge101" # Match impersonation closer to headers
153
+ impersonate="edge101"
160
154
  )
161
- # Check response status after the call
162
155
  if response.status_code != 200:
163
- # Include response text in error
164
156
  raise exceptions.FailedToGenerateResponseError(
165
157
  f"Request failed with status code {response.status_code} - {response.text}"
166
158
  )
167
-
168
159
  streaming_text = ""
169
- # Use sanitize_stream with the custom extractor
170
160
  processed_stream = sanitize_stream(
171
- data=response.iter_content(chunk_size=None), # Pass byte iterator
172
- intro_value=None, # No simple prefix
173
- to_json=True, # Each line is JSON
174
- content_extractor=self._venice_extractor, # Use the specific extractor
175
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
161
+ data=response.iter_content(chunk_size=None),
162
+ intro_value=None,
163
+ to_json=True,
164
+ content_extractor=self._venice_extractor,
165
+ yield_raw_on_error=False,
166
+ raw=raw
176
167
  )
177
-
178
168
  for content_chunk in processed_stream:
179
- # content_chunk is the string extracted by _venice_extractor
180
- if content_chunk and isinstance(content_chunk, str):
181
- streaming_text += content_chunk
182
- yield content_chunk if raw else dict(text=content_chunk)
183
-
184
- # Update history and last response after stream finishes
169
+ # Always yield as string, even in raw mode
170
+ if isinstance(content_chunk, bytes):
171
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
172
+ if raw:
173
+ yield content_chunk
174
+ else:
175
+ if content_chunk and isinstance(content_chunk, str):
176
+ streaming_text += content_chunk
177
+ yield dict(text=content_chunk)
185
178
  self.conversation.update_chat_history(prompt, streaming_text)
186
179
  self.last_response = {"text": streaming_text}
187
-
188
180
  except CurlError as e:
189
181
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
190
- # Catch requests.exceptions.RequestException if needed, but CurlError is primary for curl_cffi
191
182
  except Exception as e:
192
183
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
193
-
194
184
  def for_non_stream():
195
185
  full_text = ""
196
- # Iterate through the generator provided by for_stream
197
186
  for chunk_data in for_stream():
198
- # Check if chunk_data is a dict (not raw) and has 'text'
199
187
  if isinstance(chunk_data, dict) and "text" in chunk_data:
200
188
  full_text += chunk_data["text"]
201
- # If raw=True, chunk_data is the string content itself
202
189
  elif isinstance(chunk_data, str):
203
190
  full_text += chunk_data
204
- # Update last_response after aggregation
205
191
  self.last_response = {"text": full_text}
206
192
  return self.last_response
207
-
208
193
  return for_stream() if stream else for_non_stream()
209
194
 
210
195
  def chat(
@@ -213,14 +198,20 @@ class Venice(Provider):
213
198
  stream: bool = False,
214
199
  optimizer: str = None,
215
200
  conversationally: bool = False,
201
+ raw: bool = False, # Added raw parameter
216
202
  ) -> Union[str, Generator]:
217
203
  def for_stream():
218
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
219
- yield self.get_message(response)
204
+ for response in self.ask(prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally):
205
+ if raw:
206
+ yield response
207
+ else:
208
+ yield self.get_message(response)
220
209
  def for_non_stream():
221
- return self.get_message(
222
- self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
223
- )
210
+ result = self.ask(prompt, False, raw=raw, optimizer=optimizer, conversationally=conversationally)
211
+ if raw:
212
+ return result
213
+ else:
214
+ return self.get_message(result)
224
215
  return for_stream() if stream else for_non_stream()
225
216
 
226
217
  def get_message(self, response: dict) -> str:
@@ -141,7 +141,6 @@ class VercelAI(Provider):
141
141
  raise Exception(
142
142
  f"Optimizer is not one of {self.__available_optimizers}"
143
143
  )
144
-
145
144
  payload = {
146
145
  "id": "guest",
147
146
  "messages": [
@@ -155,7 +154,6 @@ class VercelAI(Provider):
155
154
  ],
156
155
  "selectedChatModelId": self.model
157
156
  }
158
-
159
157
  def for_stream():
160
158
  response = self.session.post(
161
159
  self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
@@ -163,31 +161,32 @@ class VercelAI(Provider):
163
161
  if not response.ok:
164
162
  error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
165
163
  raise exceptions.FailedToGenerateResponseError(error_msg)
166
-
167
164
  streaming_text = ""
168
- # Use sanitize_stream with the custom extractor
169
165
  processed_stream = sanitize_stream(
170
166
  data=response.iter_content(chunk_size=None), # Pass byte iterator
171
167
  intro_value=None, # No simple prefix
172
168
  to_json=False, # Content is not JSON
173
- content_extractor=self._vercelai_extractor # Use the specific extractor
169
+ content_extractor=self._vercelai_extractor, # Use the specific extractor
170
+ raw=raw
174
171
  )
175
-
176
172
  for content_chunk in processed_stream:
177
- if content_chunk and isinstance(content_chunk, str):
178
- streaming_text += content_chunk
179
- yield content_chunk if raw else dict(text=content_chunk)
180
-
173
+ # Always yield as string, even in raw mode
174
+ if isinstance(content_chunk, bytes):
175
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
176
+ if raw:
177
+ yield content_chunk
178
+ else:
179
+ if content_chunk and isinstance(content_chunk, str):
180
+ streaming_text += content_chunk
181
+ yield dict(text=content_chunk)
181
182
  self.last_response.update(dict(text=streaming_text))
182
183
  self.conversation.update_chat_history(
183
184
  prompt, self.get_message(self.last_response)
184
185
  )
185
-
186
186
  def for_non_stream():
187
187
  for _ in for_stream():
188
188
  pass
189
189
  return self.last_response
190
-
191
190
  return for_stream() if stream else for_non_stream()
192
191
 
193
192
  def chat(
@@ -196,24 +195,28 @@ class VercelAI(Provider):
196
195
  stream: bool = False,
197
196
  optimizer: str = None,
198
197
  conversationally: bool = False,
198
+ raw: bool = False, # Added raw parameter
199
199
  ) -> str:
200
- """Generate response `str`"""
201
200
  def for_stream():
202
201
  for response in self.ask(
203
- prompt, True, optimizer=optimizer, conversationally=conversationally
202
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
204
203
  ):
205
- yield self.get_message(response)
206
-
204
+ if raw:
205
+ yield response
206
+ else:
207
+ yield self.get_message(response)
207
208
  def for_non_stream():
208
- return self.get_message(
209
- self.ask(
210
- prompt,
211
- False,
212
- optimizer=optimizer,
213
- conversationally=conversationally,
214
- )
209
+ result = self.ask(
210
+ prompt,
211
+ False,
212
+ raw=raw,
213
+ optimizer=optimizer,
214
+ conversationally=conversationally,
215
215
  )
216
-
216
+ if raw:
217
+ return result
218
+ else:
219
+ return self.get_message(result)
217
220
  return for_stream() if stream else for_non_stream()
218
221
 
219
222
  def get_message(self, response: dict) -> str: