webscout 8.3.5__py3-none-any.whl → 8.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/Bard.py +12 -6
  2. webscout/DWEBS.py +66 -57
  3. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  4. webscout/Provider/AISEARCH/__init__.py +1 -1
  5. webscout/Provider/Deepinfra.py +6 -0
  6. webscout/Provider/Flowith.py +6 -1
  7. webscout/Provider/GithubChat.py +1 -0
  8. webscout/Provider/GptOss.py +207 -0
  9. webscout/Provider/Kimi.py +445 -0
  10. webscout/Provider/Netwrck.py +3 -6
  11. webscout/Provider/OPENAI/README.md +2 -1
  12. webscout/Provider/OPENAI/TogetherAI.py +50 -55
  13. webscout/Provider/OPENAI/__init__.py +4 -2
  14. webscout/Provider/OPENAI/copilot.py +20 -4
  15. webscout/Provider/OPENAI/deepinfra.py +6 -0
  16. webscout/Provider/OPENAI/e2b.py +60 -8
  17. webscout/Provider/OPENAI/flowith.py +4 -3
  18. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  19. webscout/Provider/OPENAI/gptoss.py +288 -0
  20. webscout/Provider/OPENAI/kimi.py +469 -0
  21. webscout/Provider/OPENAI/netwrck.py +8 -12
  22. webscout/Provider/OPENAI/refact.py +274 -0
  23. webscout/Provider/OPENAI/textpollinations.py +3 -6
  24. webscout/Provider/OPENAI/toolbaz.py +1 -0
  25. webscout/Provider/TTI/bing.py +14 -2
  26. webscout/Provider/TTI/together.py +10 -9
  27. webscout/Provider/TTS/README.md +0 -1
  28. webscout/Provider/TTS/__init__.py +0 -1
  29. webscout/Provider/TTS/base.py +479 -159
  30. webscout/Provider/TTS/deepgram.py +409 -156
  31. webscout/Provider/TTS/elevenlabs.py +425 -111
  32. webscout/Provider/TTS/freetts.py +317 -140
  33. webscout/Provider/TTS/gesserit.py +192 -128
  34. webscout/Provider/TTS/murfai.py +248 -113
  35. webscout/Provider/TTS/openai_fm.py +347 -129
  36. webscout/Provider/TTS/speechma.py +620 -586
  37. webscout/Provider/TextPollinationsAI.py +3 -6
  38. webscout/Provider/TogetherAI.py +50 -55
  39. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  40. webscout/Provider/__init__.py +2 -90
  41. webscout/Provider/cerebras.py +83 -33
  42. webscout/Provider/copilot.py +42 -23
  43. webscout/Provider/toolbaz.py +1 -0
  44. webscout/conversation.py +22 -20
  45. webscout/sanitize.py +14 -10
  46. webscout/scout/README.md +20 -23
  47. webscout/scout/core/crawler.py +125 -38
  48. webscout/scout/core/scout.py +26 -5
  49. webscout/version.py +1 -1
  50. webscout/webscout_search.py +13 -6
  51. webscout/webscout_search_async.py +10 -8
  52. webscout/yep_search.py +13 -5
  53. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/METADATA +2 -1
  54. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/RECORD +59 -56
  55. webscout/Provider/Glider.py +0 -225
  56. webscout/Provider/OPENAI/c4ai.py +0 -394
  57. webscout/Provider/OPENAI/glider.py +0 -330
  58. webscout/Provider/TTS/sthir.py +0 -94
  59. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  60. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
  61. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
  62. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
  63. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
@@ -15,26 +15,23 @@ class TextPollinationsAI(Provider):
15
15
  """
16
16
 
17
17
  AVAILABLE_MODELS = [
18
- "deepseek",
19
18
  "deepseek-reasoning",
20
- "gemma-roblox",
21
- "grok",
19
+ "glm",
20
+ "gpt-5-nano",
22
21
  "llama-fast-roblox",
23
22
  "llama-roblox",
24
23
  "llamascout",
25
24
  "mistral",
26
25
  "mistral-nemo-roblox",
27
26
  "mistral-roblox",
27
+ "nova-fast",
28
28
  "openai",
29
29
  "openai-audio",
30
30
  "openai-fast",
31
31
  "openai-large",
32
- "openai-reasoning",
33
32
  "openai-roblox",
34
- "phi",
35
33
  "qwen-coder",
36
34
  "bidara",
37
- "elixposearch",
38
35
  "evil",
39
36
  "hypnosis-tracy",
40
37
  "midijourney",
@@ -15,72 +15,67 @@ class TogetherAI(Provider):
15
15
  """
16
16
 
17
17
  AVAILABLE_MODELS = [
18
- "mistralai/Mistral-7B-Instruct-v0.3",
19
- "togethercomputer/MoA-1",
20
- "Qwen/Qwen2.5-7B-Instruct-Turbo",
21
- "meta-llama/Llama-3-8b-chat-hf",
22
- "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
23
- "togethercomputer/MoA-1-Turbo",
24
- "eddiehou/meta-llama/Llama-3.1-405B",
25
- "mistralai/Mistral-7B-Instruct-v0.2",
26
- "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
27
- "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
28
- "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
29
- "meta-llama/Llama-3.3-70B-Instruct-Turbo",
30
- "Qwen/Qwen2.5-VL-72B-Instruct",
31
- "arcee-ai/AFM-4.5B-Preview",
32
- "lgai/exaone-3-5-32b-instruct",
33
- "meta-llama/Llama-3-70b-chat-hf",
34
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
35
- "google/gemma-2-27b-it",
18
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
19
+ "Qwen/QwQ-32B",
36
20
  "Qwen/Qwen2-72B-Instruct",
37
- "mistralai/Mistral-Small-24B-Instruct-2501",
38
21
  "Qwen/Qwen2-VL-72B-Instruct",
39
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
40
- "meta-llama/Llama-Vision-Free",
41
- "perplexity-ai/r1-1776",
42
- "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
43
- "arcee-ai/maestro-reasoning",
44
- "togethercomputer/Refuel-Llm-V2-Small",
22
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
23
+ "Qwen/Qwen2.5-7B-Instruct-Turbo",
45
24
  "Qwen/Qwen2.5-Coder-32B-Instruct",
25
+ "Qwen/Qwen2.5-VL-72B-Instruct",
26
+ "Qwen/Qwen3-235B-A22B-Instruct-2507-tput",
27
+ "Qwen/Qwen3-235B-A22B-Thinking-2507",
28
+ "Qwen/Qwen3-235B-A22B-fp8-tput",
29
+ "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
30
+ "Salesforce/Llama-Rank-V1",
31
+ "Virtue-AI/VirtueGuard-Text-Lite",
32
+ "arcee-ai/AFM-4.5B",
46
33
  "arcee-ai/coder-large",
47
- "Qwen/QwQ-32B",
34
+ "arcee-ai/maestro-reasoning",
35
+ "arcee-ai/virtuoso-large",
48
36
  "arcee_ai/arcee-spotlight",
37
+ "blackbox/meta-llama-3-1-8b",
38
+ "deepcogito/cogito-v2-preview-deepseek-671b",
39
+ "deepseek-ai/DeepSeek-R1",
49
40
  "deepseek-ai/DeepSeek-R1-0528-tput",
50
- "marin-community/marin-8b-instruct",
51
- "lgai/exaone-deep-32b",
52
- "google/gemma-3-27b-it",
53
41
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
54
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
55
- "mistralai/Mistral-7B-Instruct-v0.1",
56
- "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
57
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
58
42
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
59
- "scb10x/scb10x-typhoon-2-1-gemma3-12b",
60
- "togethercomputer/Refuel-Llm-V2",
61
- "Qwen/Qwen2.5-72B-Instruct-Turbo",
62
- "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
63
- "meta-llama/Llama-4-Scout-17B-16E-Instruct",
64
- "meta-llama/Llama-3.2-3B-Instruct-Turbo",
65
- "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
43
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
44
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
66
45
  "deepseek-ai/DeepSeek-V3",
67
- "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
68
- "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
69
- "Qwen/Qwen3-32B-FP8",
70
- "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
71
- "arcee-ai/virtuoso-large",
46
+ "google/gemma-2-27b-it",
72
47
  "google/gemma-3n-E4B-it",
73
- "moonshotai/Kimi-K2-Instruct",
48
+ "lgai/exaone-3-5-32b-instruct",
49
+ "lgai/exaone-deep-32b",
50
+ "marin-community/marin-8b-instruct",
51
+ "meta-llama/Llama-2-70b-hf",
52
+ "meta-llama/Llama-3-70b-chat-hf",
53
+ "meta-llama/Llama-3-8b-chat-hf",
54
+ "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
55
+ "meta-llama/Llama-3.2-3B-Instruct-Turbo",
56
+ "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
57
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
58
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
59
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
60
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
61
+ "meta-llama/Llama-Vision-Free",
62
+ "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
63
+ "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
64
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
65
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
74
66
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
75
- "deepseek-ai/DeepSeek-R1",
76
- "Qwen/Qwen3-235B-A22B-fp8-tput",
77
- "Qwen/Qwen3-235B-A22B-Instruct-2507-tput",
78
- "Rrrr/nim/nvidia/llama-3.3-nemotron-super-49b-v1-de6a6453",
79
- "Rrrr/mistralai/Devstral-Small-2505-306f5881",
80
- "Qwen/Qwen3-235B-A22B-Thinking-2507",
81
- "Rrrr/ChatGPT-5",
82
- "Rrrr/MeowGPT-3.5",
83
- "blackbox/meta-llama-3-1-8b"
67
+ "mistralai/Mistral-7B-Instruct-v0.1",
68
+ "mistralai/Mistral-7B-Instruct-v0.2",
69
+ "mistralai/Mistral-7B-Instruct-v0.3",
70
+ "mistralai/Mistral-Small-24B-Instruct-2501",
71
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
72
+ "moonshotai/Kimi-K2-Instruct",
73
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
74
+ "perplexity-ai/r1-1776",
75
+ "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
76
+ "scb10x/scb10x-typhoon-2-1-gemma3-12b",
77
+ "togethercomputer/Refuel-Llm-V2-Small",
78
+ "zai-org/GLM-4.5-Air-FP8"
84
79
  ]
85
80
 
86
81
  @staticmethod
@@ -0,0 +1,339 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import random
5
+ import string
6
+ from typing import Any, Dict, Optional, Generator, Union, List
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
11
+ from webscout.AIbase import Provider, AsyncProvider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+ # Using LitProxy for intelligent proxy management
15
+ try:
16
+ from litproxy import (
17
+ get_auto_proxy, get_proxy_dict, test_proxy, get_working_proxy,
18
+ refresh_proxy_cache, get_proxy_stats, set_proxy_cache_duration,
19
+ patch, use_proxy, proxyify, list_proxies, test_all_proxies,
20
+ current_proxy, make_request_with_auto_retry, create_auto_retry_session
21
+ )
22
+ LITPROXY_AVAILABLE = True
23
+ except ImportError:
24
+ LITPROXY_AVAILABLE = False
25
+
26
+ import requests
27
+
28
+ class VercelAIGateway(Provider):
29
+ """
30
+ A class to interact with the Vercel AI SDK Gateway Demo API with intelligent proxy management using LitProxy.
31
+
32
+ Install LitProxy for advanced proxy features:
33
+ pip install litproxy
34
+
35
+ Features:
36
+ - Intelligent proxy rotation and health monitoring
37
+ - Automatic retry with proxy fallback on failures
38
+ - Support for multiple proxy sources (Webshare, NordVPN, Remote lists)
39
+ - Seamless curl_cffi session integration
40
+ - Comprehensive proxy diagnostics and statistics
41
+ """
42
+
43
+ AVAILABLE_MODELS = [
44
+ "amazon/nova-lite",
45
+ "amazon/nova-micro",
46
+ "anthropic/claude-3.5-haiku",
47
+ "google/gemini-2.0-flash",
48
+ "meta/llama-3.1-8b",
49
+ "mistral/ministral-3b",
50
+ "openai/gpt-3.5-turbo",
51
+ "openai/gpt-4o-mini",
52
+ "xai/grok-3"
53
+ ]
54
+
55
+ @staticmethod
56
+ def _vercel_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
57
+ """Extracts content from Vercel AI Gateway stream JSON objects."""
58
+ if isinstance(chunk, dict):
59
+ if chunk.get("type") == "text-delta":
60
+ return chunk.get("delta")
61
+ return None
62
+
63
+ def __init__(
64
+ self,
65
+ is_conversation: bool = True,
66
+ max_tokens: int = 2049,
67
+ timeout: int = 30,
68
+ intro: str = None,
69
+ filepath: str = None,
70
+ update_file: bool = True,
71
+ history_offset: int = 10250,
72
+ act: str = None,
73
+ model: str = "openai/gpt-4o-mini",
74
+ system_prompt: str = "You are a helpful assistant.",
75
+ browser: str = "chrome",
76
+ use_proxy: bool = True,
77
+ max_proxy_attempts: int = 3,
78
+ proxy_cache_duration: int = 300
79
+ ):
80
+ """
81
+ Initializes the Vercel AI Gateway API client with LitProxy integration.
82
+
83
+ Args:
84
+ use_proxy (bool): Enable proxy usage via LitProxy (default: True)
85
+ max_proxy_attempts (int): Maximum proxy retry attempts (default: 3)
86
+ proxy_cache_duration (int): Proxy cache duration in seconds (default: 300)
87
+ """
88
+ if model not in self.AVAILABLE_MODELS:
89
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
90
+
91
+ self.url = "https://ai-sdk-gateway-demo.labs.vercel.dev/api/chat"
92
+
93
+ # Initialize LitAgent
94
+ self.agent = LitAgent()
95
+ self.fingerprint = self.agent.generate_fingerprint(browser)
96
+
97
+ self.headers = {
98
+ "Accept": "*/*",
99
+ "Accept-Encoding": "gzip, deflate, br, zstd",
100
+ "Accept-Language": self.fingerprint["accept_language"],
101
+ "Content-Type": "application/json",
102
+ "DNT": "1",
103
+ "Origin": "https://ai-sdk-gateway-demo.labs.vercel.dev",
104
+ "Priority": "u=1, i",
105
+ "Referer": f"https://ai-sdk-gateway-demo.labs.vercel.dev/?modelId={model.replace('/', '%2F')}",
106
+ "Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
107
+ "Sec-CH-UA-Mobile": "?0",
108
+ "Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
109
+ "Sec-Fetch-Dest": "empty",
110
+ "Sec-Fetch-Mode": "cors",
111
+ "Sec-Fetch-Site": "same-origin",
112
+ "Sec-GPC": "1",
113
+ "User-Agent": self.fingerprint.get("user_agent", ""),
114
+ "X-Forwarded-For": self.fingerprint.get("x-forwarded-for", ""),
115
+ "X-Real-IP": self.fingerprint.get("x-real-ip", ""),
116
+ "X-Client-IP": self.fingerprint.get("x-client-ip", ""),
117
+ }
118
+
119
+ # Initialize curl_cffi Session
120
+ self.session = Session()
121
+ self.session.headers.update(self.headers)
122
+
123
+ # Configure proxy settings
124
+ self.use_proxy = use_proxy
125
+ self.max_proxy_attempts = max_proxy_attempts
126
+ self.proxy_cache_duration = proxy_cache_duration
127
+
128
+ # Integrate LitProxy for intelligent proxy management
129
+ if use_proxy and LITPROXY_AVAILABLE:
130
+ try:
131
+ # Configure proxy cache duration
132
+ set_proxy_cache_duration(proxy_cache_duration)
133
+ # Patch the session with proxy support
134
+ patch(self.session)
135
+ self.proxy_enabled = True
136
+ except Exception as e:
137
+ self.proxy_enabled = False
138
+ else:
139
+ self.proxy_enabled = False
140
+ if use_proxy and not LITPROXY_AVAILABLE:
141
+ # Silently disable proxy if LitProxy not available
142
+ pass
143
+
144
+ self.system_prompt = system_prompt
145
+ self.is_conversation = is_conversation
146
+ self.max_tokens_to_sample = max_tokens
147
+ self.timeout = timeout
148
+ self.last_response = {}
149
+ self.model = model
150
+
151
+ self.__available_optimizers = (
152
+ method
153
+ for method in dir(Optimizers)
154
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
155
+ )
156
+
157
+ Conversation.intro = (
158
+ AwesomePrompts().get_act(
159
+ act, raise_not_found=True, default=None, case_insensitive=True
160
+ )
161
+ if act
162
+ else intro or Conversation.intro
163
+ )
164
+
165
+ self.conversation = Conversation(
166
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
167
+ )
168
+ self.conversation.history_offset = history_offset
169
+
170
+ def refresh_identity(self, browser: str = None):
171
+ """
172
+ Refreshes the browser identity fingerprint.
173
+
174
+ Args:
175
+ browser: Specific browser to use for the new fingerprint
176
+ """
177
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
178
+ self.fingerprint = self.agent.generate_fingerprint(browser)
179
+
180
+ # Update headers with new fingerprint
181
+ self.headers.update({
182
+ "Accept-Language": self.fingerprint["accept_language"],
183
+ "User-Agent": self.fingerprint.get("user_agent", ""),
184
+ "Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
185
+ "Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
186
+ })
187
+
188
+ # Update session headers
189
+ self.session.headers.update(self.headers)
190
+ return self.fingerprint
191
+
192
+ def _make_request(self, payload: dict, stream: bool = False):
193
+ """
194
+ Make a request to the API. The session is already patched with LitProxy auto-retry if enabled.
195
+
196
+ Args:
197
+ payload: Request payload
198
+ stream: Whether to stream the response
199
+
200
+ Returns:
201
+ Response object
202
+ """
203
+ # Use the session directly - it's already patched with proxy auto-retry if enabled
204
+ response = self.session.post(
205
+ self.url,
206
+ data=json.dumps(payload),
207
+ stream=stream,
208
+ timeout=self.timeout,
209
+ impersonate="chrome110"
210
+ )
211
+ response.raise_for_status()
212
+ return response
213
+
214
+ def ask(
215
+ self,
216
+ prompt: str,
217
+ stream: bool = False,
218
+ raw: bool = False,
219
+ optimizer: str = None,
220
+ conversationally: bool = False,
221
+ ) -> Union[Dict[str, Any], Generator]:
222
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
223
+ if optimizer:
224
+ if optimizer in self.__available_optimizers:
225
+ conversation_prompt = getattr(Optimizers, optimizer)(
226
+ conversation_prompt if conversationally else prompt
227
+ )
228
+ else:
229
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
230
+
231
+ # Generate random IDs
232
+ conversation_id = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
233
+ message_id = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
234
+
235
+ # Payload construction
236
+ payload = {
237
+ "modelId": self.model,
238
+ "id": conversation_id,
239
+ "messages": [
240
+ {
241
+ "parts": [{"type": "text", "text": conversation_prompt}],
242
+ "id": message_id,
243
+ "role": "user"
244
+ }
245
+ ],
246
+ "trigger": "submit-message"
247
+ }
248
+
249
+ def for_stream():
250
+ streaming_text = ""
251
+ try:
252
+ response = self._make_request(payload, stream=True)
253
+
254
+ # Use sanitize_stream for SSE format
255
+ processed_stream = sanitize_stream(
256
+ data=response.iter_content(chunk_size=None),
257
+ intro_value="data:",
258
+ to_json=True,
259
+ skip_markers=["[DONE]"],
260
+ content_extractor=self._vercel_extractor,
261
+ yield_raw_on_error=False
262
+ )
263
+
264
+ for content_chunk in processed_stream:
265
+ if content_chunk and isinstance(content_chunk, str):
266
+ streaming_text += content_chunk
267
+ resp = dict(text=content_chunk)
268
+ yield resp if not raw else content_chunk
269
+
270
+ except CurlError as e:
271
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
272
+ except Exception as e:
273
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
274
+ finally:
275
+ if streaming_text:
276
+ self.last_response = {"text": streaming_text}
277
+ self.conversation.update_chat_history(prompt, streaming_text)
278
+
279
+ def for_non_stream():
280
+ try:
281
+ response = self._make_request(payload, stream=False)
282
+
283
+ # Collect all streaming chunks for non-stream mode
284
+ full_text = ""
285
+ processed_stream = sanitize_stream(
286
+ data=response.iter_content(chunk_size=None),
287
+ intro_value="data:",
288
+ to_json=True,
289
+ skip_markers=["[DONE]"],
290
+ content_extractor=self._vercel_extractor,
291
+ yield_raw_on_error=False
292
+ )
293
+
294
+ for content_chunk in processed_stream:
295
+ if content_chunk and isinstance(content_chunk, str):
296
+ full_text += content_chunk
297
+
298
+ self.last_response = {"text": full_text}
299
+ self.conversation.update_chat_history(prompt, full_text)
300
+ return self.last_response if not raw else full_text
301
+
302
+ except CurlError as e:
303
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
304
+ except Exception as e:
305
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
306
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
307
+
308
+ return for_stream() if stream else for_non_stream()
309
+
310
+ def chat(
311
+ self,
312
+ prompt: str,
313
+ stream: bool = False,
314
+ optimizer: str = None,
315
+ conversationally: bool = False,
316
+ ) -> Union[str, Generator[str, None, None]]:
317
+ def for_stream_chat():
318
+ gen = self.ask(
319
+ prompt, stream=True, raw=False,
320
+ optimizer=optimizer, conversationally=conversationally
321
+ )
322
+ for response_dict in gen:
323
+ yield self.get_message(response_dict)
324
+
325
+ def for_non_stream_chat():
326
+ response_data = self.ask(
327
+ prompt, stream=False, raw=False,
328
+ optimizer=optimizer, conversationally=conversationally
329
+ )
330
+ return self.get_message(response_data)
331
+
332
+ return for_stream_chat() if stream else for_non_stream_chat()
333
+
334
+ def get_message(self, response: dict) -> str:
335
+ assert isinstance(response, dict), "Response should be of dict data-type only"
336
+ return response["text"]
337
+ if __name__ == "__main__":
338
+ test_ai = VercelAIGateway(use_proxy=True, max_proxy_attempts=3, proxy_cache_duration=300)
339
+ print(test_ai.chat("Hello, how are you?"))
@@ -39,7 +39,6 @@ from .Jadve import *
39
39
  from .chatglm import *
40
40
  from .hermes import *
41
41
  from .TextPollinationsAI import *
42
- from .Glider import *
43
42
  from .QwenLM import *
44
43
  from .granite import *
45
44
  from .WiseCat import *
@@ -74,7 +73,6 @@ from .WrDoChat import WrDoChat
74
73
  from .Nemotron import NEMOTRON
75
74
  from .FreeGemini import FreeGemini
76
75
  from .Flowith import Flowith
77
- from .samurai import samurai
78
76
  from .lmarena import lmarena
79
77
  from .oivscode import oivscode
80
78
  from .XenAI import XenAI
@@ -84,91 +82,5 @@ from .TogetherAI import TogetherAI
84
82
  from .MiniMax import MiniMax
85
83
  from .Qodo import *
86
84
  from .monochat import MonoChat
87
- __all__ = [
88
- 'SCNet',
89
- 'MonoChat',
90
- 'MiniMax',
91
- 'QodoAI',
92
- 'GeminiProxy',
93
- 'TogetherAI',
94
- 'oivscode',
95
- 'DeepSeekAssistant',
96
- 'lmarena',
97
- 'XenAI',
98
- 'NEMOTRON',
99
- 'Flowith',
100
- 'samurai',
101
- 'FreeGemini',
102
- 'WrDoChat',
103
- 'GizAI',
104
- 'ChatSandbox',
105
- 'SciraAI',
106
- 'StandardInputAI',
107
- 'OpenGPT',
108
- 'Venice',
109
- 'ExaAI',
110
- 'Copilot',
111
- 'TwoAI',
112
- 'HeckAI',
113
- 'AllenAI',
114
- 'PerplexityLabs',
115
- 'AkashGPT',
116
- 'WiseCat',
117
- 'IBMGranite',
118
- 'QwenLM',
119
- 'LambdaChat',
120
- 'TextPollinationsAI',
121
- 'GliderAI',
122
- 'Cohere',
123
- 'REKA',
124
- 'GROQ',
125
- 'AsyncGROQ',
126
- 'OPENAI',
127
- 'AsyncOPENAI',
128
- 'KOBOLDAI',
129
- 'AsyncKOBOLDAI',
130
- 'BLACKBOXAI',
131
- 'GEMINI',
132
- 'DeepInfra',
133
- 'AI4Chat',
134
- 'OLLAMA',
135
- 'AndiSearch',
136
- 'Sambanova',
137
- 'KOALA',
138
- 'Meta',
139
- 'PiAI',
140
- 'Julius',
141
- 'YEPCHAT',
142
- 'Cloudflare',
143
- 'TurboSeek',
144
- 'TeachAnything',
145
- 'X0GPT',
146
- 'Cerebras',
147
- 'GEMINIAPI',
148
- 'SonusAI',
149
- 'Cleeai',
150
- 'Elmo',
151
- 'ChatGPTClone',
152
- 'TypefullyAI',
153
- 'Netwrck',
154
- 'LLMChat',
155
- 'LLMChatCo',
156
- 'Talkai',
157
- 'Llama3Mitril',
158
- 'Marcus',
159
- 'Netwrck',
160
- 'MultiChatAI',
161
- 'JadveOpenAI',
162
- 'ChatGLM',
163
- 'NousHermes',
164
- 'FreeAIChat',
165
- 'GithubChat',
166
- 'VercelAI',
167
- 'ExaChat',
168
- 'AskSteve',
169
- 'Aitopia',
170
- 'SearchChatAI',
171
- 'Toolbaz',
172
- 'MCPCore',
173
- 'TypliAI',
174
- ]
85
+ from .Kimi import Kimi
86
+ from .GptOss import GptOss