webscout 8.3.4__py3-none-any.whl → 8.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +52 -1016
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/AISEARCH/PERPLEXED_search.py +214 -0
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +2 -0
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +13 -1
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/GithubChat.py +1 -0
- webscout/Provider/GptOss.py +207 -0
- webscout/Provider/Kimi.py +445 -0
- webscout/Provider/Netwrck.py +3 -6
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +12 -8
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +4 -4
- webscout/Provider/OPENAI/copilot.py +20 -4
- webscout/Provider/OPENAI/deepinfra.py +12 -0
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/gptoss.py +288 -0
- webscout/Provider/OPENAI/kimi.py +469 -0
- webscout/Provider/OPENAI/netwrck.py +8 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/scirachat.py +4 -0
- webscout/Provider/OPENAI/textpollinations.py +11 -10
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +30 -6
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +11 -9
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +0 -1
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TextPollinationsAI.py +11 -10
- webscout/Provider/TogetherAI.py +12 -4
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +2 -96
- webscout/Provider/cerebras.py +83 -33
- webscout/Provider/copilot.py +42 -23
- webscout/Provider/scira_chat.py +4 -0
- webscout/Provider/toolbaz.py +6 -10
- webscout/Provider/typefully.py +1 -11
- webscout/__init__.py +3 -15
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +99 -2
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/conversation.py +22 -20
- webscout/sanitize.py +1078 -0
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/METADATA +10 -149
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/RECORD +88 -87
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
webscout/Provider/Glider.py
DELETED
|
@@ -1,225 +0,0 @@
|
|
|
1
|
-
import cloudscraper
|
|
2
|
-
# from curl_cffi.requests import Session
|
|
3
|
-
import json
|
|
4
|
-
from typing import Union, Any, Dict, Generator, Optional, List
|
|
5
|
-
|
|
6
|
-
from curl_cffi import CurlError
|
|
7
|
-
|
|
8
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
|
-
from webscout.AIbase import Provider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
from webscout.litagent import LitAgent as Lit
|
|
12
|
-
|
|
13
|
-
class GliderAI(Provider):
|
|
14
|
-
"""
|
|
15
|
-
A class to interact with the Glider.so API.
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
AVAILABLE_MODELS = [
|
|
19
|
-
"chat-llama-3-1-8b",
|
|
20
|
-
"chat-llama-3-2-3b",
|
|
21
|
-
"chat-deepseek-r1-qwen-32b",
|
|
22
|
-
"chat-qwen-2-5-7b",
|
|
23
|
-
"chat-qwen-qwq-32b",
|
|
24
|
-
"deepseek-ai/DeepSeek-R1",
|
|
25
|
-
]
|
|
26
|
-
|
|
27
|
-
def __init__(
|
|
28
|
-
self,
|
|
29
|
-
is_conversation: bool = True,
|
|
30
|
-
max_tokens: int = 600,
|
|
31
|
-
timeout: int = 30,
|
|
32
|
-
intro: Optional[str] = None,
|
|
33
|
-
filepath: Optional[str] = None,
|
|
34
|
-
update_file: bool = True,
|
|
35
|
-
proxies: dict = {},
|
|
36
|
-
history_offset: int = 10250,
|
|
37
|
-
act: Optional[str] = None,
|
|
38
|
-
model: str = "chat-llama-3-1-8b",
|
|
39
|
-
system_prompt: str = "You are a helpful AI assistant."
|
|
40
|
-
):
|
|
41
|
-
"""Initializes the GliderAI API client."""
|
|
42
|
-
if model not in self.AVAILABLE_MODELS:
|
|
43
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
|
|
44
|
-
|
|
45
|
-
self.session = cloudscraper.create_scraper() # Use cloudscraper Session
|
|
46
|
-
self.is_conversation = is_conversation
|
|
47
|
-
self.max_tokens_to_sample = max_tokens
|
|
48
|
-
self.api_endpoint = "https://glider.so/api/chat"
|
|
49
|
-
self.stream_chunk_size = 64
|
|
50
|
-
self.timeout = timeout
|
|
51
|
-
self.last_response = {}
|
|
52
|
-
self.model = model
|
|
53
|
-
self.system_prompt = system_prompt
|
|
54
|
-
self.headers = {
|
|
55
|
-
"accept": "*/*",
|
|
56
|
-
"accept-language": "en-US,en;q=0.9",
|
|
57
|
-
"content-type": "text/plain;charset=UTF-8",
|
|
58
|
-
"origin": "https://glider.so",
|
|
59
|
-
"referer": "https://glider.so/",
|
|
60
|
-
"user-agent": Lit().random(),
|
|
61
|
-
"cookie": "_vcrcs=1.1746977094.3600.NDlmNmM5YWFmNzMxZWUyNzE4ZjBhOTJlZGZlZDU3MGU=.850a77f5f36f60ae5da2f51b55231a54",
|
|
62
|
-
}
|
|
63
|
-
self.session.headers.update(self.headers)
|
|
64
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
65
|
-
|
|
66
|
-
self.__available_optimizers = (
|
|
67
|
-
method for method in dir(Optimizers)
|
|
68
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
69
|
-
)
|
|
70
|
-
|
|
71
|
-
Conversation.intro = (
|
|
72
|
-
AwesomePrompts().get_act(
|
|
73
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
74
|
-
)
|
|
75
|
-
if act
|
|
76
|
-
else intro or Conversation.intro
|
|
77
|
-
)
|
|
78
|
-
self.conversation = Conversation(
|
|
79
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
80
|
-
)
|
|
81
|
-
self.conversation.history_offset = history_offset
|
|
82
|
-
|
|
83
|
-
@staticmethod
|
|
84
|
-
def _glider_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
85
|
-
"""Extracts content from Glider stream JSON objects."""
|
|
86
|
-
if isinstance(chunk, dict):
|
|
87
|
-
# Handle both standard and DeepSeek response formats within choices
|
|
88
|
-
return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
|
|
89
|
-
return None
|
|
90
|
-
|
|
91
|
-
def ask(
|
|
92
|
-
self,
|
|
93
|
-
prompt: str,
|
|
94
|
-
stream: bool = False,
|
|
95
|
-
raw: bool = False,
|
|
96
|
-
optimizer: Optional[str] = None,
|
|
97
|
-
conversationally: bool = False,
|
|
98
|
-
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
99
|
-
"""Chat with AI.
|
|
100
|
-
|
|
101
|
-
Args:
|
|
102
|
-
prompt (str): Prompt to be sent.
|
|
103
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
104
|
-
raw (bool, optional): Return raw response chunks instead of dict. Defaults to False.
|
|
105
|
-
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
106
|
-
conversationally (bool, optional): Use conversationally modified prompt when optimizer specified. Defaults to False.
|
|
107
|
-
Returns:
|
|
108
|
-
dict or Generator[dict, None, None]: The response from the API.
|
|
109
|
-
"""
|
|
110
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
111
|
-
if optimizer:
|
|
112
|
-
if optimizer in self.__available_optimizers:
|
|
113
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
114
|
-
conversation_prompt if conversationally else prompt
|
|
115
|
-
)
|
|
116
|
-
else:
|
|
117
|
-
raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
|
|
118
|
-
|
|
119
|
-
payload = {
|
|
120
|
-
"messages": [
|
|
121
|
-
{"role": "system", "content": self.system_prompt},
|
|
122
|
-
{"role": "user", "content": conversation_prompt}
|
|
123
|
-
],
|
|
124
|
-
"model": self.model,
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
def for_stream():
|
|
128
|
-
streaming_text = ""
|
|
129
|
-
try:
|
|
130
|
-
import json
|
|
131
|
-
response = self.session.post(
|
|
132
|
-
self.api_endpoint, data=json.dumps(payload), stream=True, timeout=self.timeout
|
|
133
|
-
)
|
|
134
|
-
response.raise_for_status()
|
|
135
|
-
|
|
136
|
-
# Use sanitize_stream
|
|
137
|
-
processed_stream = sanitize_stream(
|
|
138
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
139
|
-
intro_value="data:",
|
|
140
|
-
to_json=True, # Stream sends JSON
|
|
141
|
-
content_extractor=self._glider_extractor, # Use the specific extractor
|
|
142
|
-
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
143
|
-
)
|
|
144
|
-
|
|
145
|
-
for content_chunk in processed_stream:
|
|
146
|
-
# content_chunk is the string extracted by _glider_extractor
|
|
147
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
148
|
-
streaming_text += content_chunk
|
|
149
|
-
yield content_chunk if raw else {"text": content_chunk}
|
|
150
|
-
except CurlError as e:
|
|
151
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
152
|
-
except Exception as e:
|
|
153
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
|
|
154
|
-
self.last_response.update(dict(text=streaming_text))
|
|
155
|
-
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
156
|
-
|
|
157
|
-
def for_non_stream():
|
|
158
|
-
for _ in for_stream():
|
|
159
|
-
pass
|
|
160
|
-
return self.last_response
|
|
161
|
-
|
|
162
|
-
return for_stream() if stream else for_non_stream()
|
|
163
|
-
|
|
164
|
-
def chat(
|
|
165
|
-
self,
|
|
166
|
-
prompt: str,
|
|
167
|
-
stream: bool = False,
|
|
168
|
-
optimizer: Optional[str] = None,
|
|
169
|
-
conversationally: bool = False,
|
|
170
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
171
|
-
"""Generate response as a string.
|
|
172
|
-
|
|
173
|
-
Args:
|
|
174
|
-
prompt (str): Prompt to be sent.
|
|
175
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
176
|
-
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
177
|
-
conversationally (bool, optional): Use conversationally modified prompt when optimizer specified. Defaults to False.
|
|
178
|
-
Returns:
|
|
179
|
-
str or Generator[str, None, None]: The response generated.
|
|
180
|
-
"""
|
|
181
|
-
def for_stream():
|
|
182
|
-
for response in self.ask(
|
|
183
|
-
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
184
|
-
):
|
|
185
|
-
yield self.get_message(response)
|
|
186
|
-
def for_non_stream():
|
|
187
|
-
return self.get_message(
|
|
188
|
-
self.ask(
|
|
189
|
-
prompt,
|
|
190
|
-
stream=False,
|
|
191
|
-
optimizer=optimizer,
|
|
192
|
-
conversationally=conversationally,
|
|
193
|
-
)
|
|
194
|
-
)
|
|
195
|
-
return for_stream() if stream else for_non_stream()
|
|
196
|
-
|
|
197
|
-
def get_message(self, response: dict) -> str:
|
|
198
|
-
"""Retrieves message only from response."""
|
|
199
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
200
|
-
return response["text"]
|
|
201
|
-
|
|
202
|
-
if __name__ == "__main__":
|
|
203
|
-
print("-" * 80)
|
|
204
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
205
|
-
print("-" * 80)
|
|
206
|
-
|
|
207
|
-
for model in GliderAI.AVAILABLE_MODELS:
|
|
208
|
-
try:
|
|
209
|
-
test_ai = GliderAI(model=model, timeout=60)
|
|
210
|
-
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
211
|
-
response_text = ""
|
|
212
|
-
for chunk in response:
|
|
213
|
-
response_text += chunk
|
|
214
|
-
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
215
|
-
|
|
216
|
-
if response_text and len(response_text.strip()) > 0:
|
|
217
|
-
status = "✓"
|
|
218
|
-
# Truncate response if too long
|
|
219
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
220
|
-
else:
|
|
221
|
-
status = "✗"
|
|
222
|
-
display_text = "Empty or invalid response"
|
|
223
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
224
|
-
except Exception as e:
|
|
225
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
@@ -1,238 +0,0 @@
|
|
|
1
|
-
# WebScout Auto-Proxy System
|
|
2
|
-
|
|
3
|
-
The WebScout Auto-Proxy system provides automatic proxy injection for all OpenAI-compatible providers. This system fetches proxies from a remote source and automatically configures them for HTTP sessions.
|
|
4
|
-
|
|
5
|
-
## Features
|
|
6
|
-
|
|
7
|
-
- **Automatic Proxy Injection**: All OpenAI-compatible providers automatically get proxy support
|
|
8
|
-
- **Multiple HTTP Client Support**: Works with `requests`, `httpx`, and `curl_cffi`
|
|
9
|
-
- **Proxy Pool Management**: Automatically fetches and caches proxies from remote source
|
|
10
|
-
- **Working Proxy Detection**: Tests proxies to find working ones
|
|
11
|
-
- **Easy Disable Option**: Can be disabled per provider instance or globally
|
|
12
|
-
|
|
13
|
-
## How It Works
|
|
14
|
-
|
|
15
|
-
The system uses a metaclass (`ProxyAutoMeta`) that automatically:
|
|
16
|
-
|
|
17
|
-
1. Fetches proxies from `http://207.180.209.185:5000/ips.txt`
|
|
18
|
-
2. Caches proxies for 5 minutes to avoid excessive requests
|
|
19
|
-
3. Randomly selects a proxy for each provider instance
|
|
20
|
-
4. Patches existing HTTP session objects with proxy configuration
|
|
21
|
-
5. Provides helper methods for creating proxied sessions
|
|
22
|
-
|
|
23
|
-
## Usage
|
|
24
|
-
|
|
25
|
-
### Automatic Usage (Default)
|
|
26
|
-
|
|
27
|
-
All OpenAI-compatible providers automatically get proxy support:
|
|
28
|
-
|
|
29
|
-
```python
|
|
30
|
-
from webscout.Provider.OPENAI.yep import YEPCHAT
|
|
31
|
-
|
|
32
|
-
# Proxy is automatically configured
|
|
33
|
-
client = YEPCHAT()
|
|
34
|
-
|
|
35
|
-
# All requests will use the configured proxy
|
|
36
|
-
response = client.chat.completions.create(
|
|
37
|
-
model="DeepSeek-R1-Distill-Qwen-32B",
|
|
38
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
39
|
-
)
|
|
40
|
-
```
|
|
41
|
-
|
|
42
|
-
### Disabling Auto-Proxy
|
|
43
|
-
|
|
44
|
-
You can disable automatic proxy injection:
|
|
45
|
-
|
|
46
|
-
```python
|
|
47
|
-
# Disable for a specific instance
|
|
48
|
-
client = YEPCHAT(disable_auto_proxy=True)
|
|
49
|
-
|
|
50
|
-
# Or set a class attribute to disable for all instances
|
|
51
|
-
class MyProvider(OpenAICompatibleProvider):
|
|
52
|
-
DISABLE_AUTO_PROXY = True
|
|
53
|
-
```
|
|
54
|
-
|
|
55
|
-
### Manual Proxy Configuration
|
|
56
|
-
|
|
57
|
-
You can also provide your own proxies:
|
|
58
|
-
|
|
59
|
-
```python
|
|
60
|
-
custom_proxies = {
|
|
61
|
-
'http': 'http://user:pass@proxy.example.com:8080',
|
|
62
|
-
'https': 'http://user:pass@proxy.example.com:8080'
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
client = YEPCHAT(proxies=custom_proxies)
|
|
66
|
-
```
|
|
67
|
-
|
|
68
|
-
### Using Helper Methods
|
|
69
|
-
|
|
70
|
-
Each provider instance gets helper methods for creating proxied sessions:
|
|
71
|
-
|
|
72
|
-
```python
|
|
73
|
-
client = YEPCHAT()
|
|
74
|
-
|
|
75
|
-
# Get a requests.Session with proxies configured
|
|
76
|
-
session = client.get_proxied_session()
|
|
77
|
-
|
|
78
|
-
# Get a curl_cffi Session with proxies configured
|
|
79
|
-
curl_session = client.get_proxied_curl_session(impersonate="chrome120")
|
|
80
|
-
|
|
81
|
-
# Get an httpx.Client with proxies configured (if httpx is installed)
|
|
82
|
-
httpx_client = client.get_proxied_httpx_client()
|
|
83
|
-
```
|
|
84
|
-
|
|
85
|
-
## Direct API Usage
|
|
86
|
-
|
|
87
|
-
You can also use the proxy functions directly:
|
|
88
|
-
|
|
89
|
-
```python
|
|
90
|
-
from webscout.Provider.OPENAI.autoproxy import (
|
|
91
|
-
get_auto_proxy,
|
|
92
|
-
get_proxy_dict,
|
|
93
|
-
get_working_proxy,
|
|
94
|
-
test_proxy,
|
|
95
|
-
get_proxy_stats
|
|
96
|
-
)
|
|
97
|
-
|
|
98
|
-
# Get a random proxy
|
|
99
|
-
proxy = get_auto_proxy()
|
|
100
|
-
|
|
101
|
-
# Get proxy in dictionary format
|
|
102
|
-
proxy_dict = get_proxy_dict()
|
|
103
|
-
|
|
104
|
-
# Find a working proxy (tests multiple proxies)
|
|
105
|
-
working_proxy = get_working_proxy(max_attempts=5)
|
|
106
|
-
|
|
107
|
-
# Test if a proxy is working
|
|
108
|
-
is_working = test_proxy(proxy)
|
|
109
|
-
|
|
110
|
-
# Get proxy cache statistics
|
|
111
|
-
stats = get_proxy_stats()
|
|
112
|
-
```
|
|
113
|
-
|
|
114
|
-
## Proxy Format
|
|
115
|
-
|
|
116
|
-
The system expects proxies in the format:
|
|
117
|
-
```
|
|
118
|
-
http://username:password@host:port
|
|
119
|
-
```
|
|
120
|
-
|
|
121
|
-
Example:
|
|
122
|
-
```
|
|
123
|
-
http://fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==:fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==@190.103.177.163:80
|
|
124
|
-
```
|
|
125
|
-
|
|
126
|
-
## Configuration
|
|
127
|
-
|
|
128
|
-
### Cache Duration
|
|
129
|
-
|
|
130
|
-
You can adjust the proxy cache duration:
|
|
131
|
-
|
|
132
|
-
```python
|
|
133
|
-
from webscout.Provider.OPENAI.autoproxy import set_proxy_cache_duration
|
|
134
|
-
|
|
135
|
-
# Set cache to 10 minutes
|
|
136
|
-
set_proxy_cache_duration(600)
|
|
137
|
-
```
|
|
138
|
-
|
|
139
|
-
### Force Refresh
|
|
140
|
-
|
|
141
|
-
You can force refresh the proxy cache:
|
|
142
|
-
|
|
143
|
-
```python
|
|
144
|
-
from webscout.Provider.OPENAI.autoproxy import refresh_proxy_cache
|
|
145
|
-
|
|
146
|
-
# Force refresh and get number of proxies loaded
|
|
147
|
-
count = refresh_proxy_cache()
|
|
148
|
-
print(f"Loaded {count} proxies")
|
|
149
|
-
```
|
|
150
|
-
|
|
151
|
-
## Error Handling
|
|
152
|
-
|
|
153
|
-
The system gracefully handles errors:
|
|
154
|
-
|
|
155
|
-
- If proxy fetching fails, providers work without proxies
|
|
156
|
-
- If a proxy test fails, the system tries other proxies
|
|
157
|
-
- If no working proxy is found, providers fall back to direct connections
|
|
158
|
-
|
|
159
|
-
## Logging
|
|
160
|
-
|
|
161
|
-
The system uses Python's logging module. To see proxy-related logs:
|
|
162
|
-
|
|
163
|
-
```python
|
|
164
|
-
import logging
|
|
165
|
-
logging.basicConfig(level=logging.INFO)
|
|
166
|
-
|
|
167
|
-
# Or specifically for the autoproxy module
|
|
168
|
-
logger = logging.getLogger('webscout.Provider.OPENAI.autoproxy')
|
|
169
|
-
logger.setLevel(logging.DEBUG)
|
|
170
|
-
```
|
|
171
|
-
|
|
172
|
-
## Testing
|
|
173
|
-
|
|
174
|
-
Run the test suite to verify functionality:
|
|
175
|
-
|
|
176
|
-
```bash
|
|
177
|
-
python webscout/Provider/OPENAI/test_autoproxy.py
|
|
178
|
-
```
|
|
179
|
-
|
|
180
|
-
## Implementation Details
|
|
181
|
-
|
|
182
|
-
### ProxyAutoMeta Metaclass
|
|
183
|
-
|
|
184
|
-
The `ProxyAutoMeta` metaclass is applied to `OpenAICompatibleProvider` and:
|
|
185
|
-
|
|
186
|
-
1. Intercepts class instantiation
|
|
187
|
-
2. Checks for `disable_auto_proxy` parameter or class attribute
|
|
188
|
-
3. Fetches and configures proxies if not disabled
|
|
189
|
-
4. Patches existing session objects
|
|
190
|
-
5. Adds helper methods to the instance
|
|
191
|
-
|
|
192
|
-
### Session Patching
|
|
193
|
-
|
|
194
|
-
The system automatically patches these session types:
|
|
195
|
-
- `requests.Session` - Updates the `proxies` attribute
|
|
196
|
-
- `httpx.Client` - Sets the `_proxies` attribute
|
|
197
|
-
- `curl_cffi.Session` - Updates the `proxies` attribute
|
|
198
|
-
- `curl_cffi.AsyncSession` - Updates the `proxies` attribute
|
|
199
|
-
|
|
200
|
-
### Proxy Source
|
|
201
|
-
|
|
202
|
-
Proxies are fetched from: `http://207.180.209.185:5000/ips.txt`
|
|
203
|
-
|
|
204
|
-
The system expects one proxy per line in the format shown above.
|
|
205
|
-
|
|
206
|
-
## Troubleshooting
|
|
207
|
-
|
|
208
|
-
### No Proxies Available
|
|
209
|
-
|
|
210
|
-
If you see "No proxies available" messages:
|
|
211
|
-
1. Check if the proxy source URL is accessible
|
|
212
|
-
2. Verify your internet connection
|
|
213
|
-
3. Check if the proxy format is correct
|
|
214
|
-
|
|
215
|
-
### Proxy Test Failures
|
|
216
|
-
|
|
217
|
-
If proxy tests fail:
|
|
218
|
-
1. Some proxies may be temporarily unavailable (normal)
|
|
219
|
-
2. The test URL (`https://httpbin.org/ip`) may be blocked
|
|
220
|
-
3. Network connectivity issues
|
|
221
|
-
|
|
222
|
-
### Provider Not Getting Proxies
|
|
223
|
-
|
|
224
|
-
If a provider doesn't get automatic proxies:
|
|
225
|
-
1. Ensure it inherits from `OpenAICompatibleProvider`
|
|
226
|
-
2. Check if `disable_auto_proxy` is set
|
|
227
|
-
3. Verify the metaclass is properly imported
|
|
228
|
-
|
|
229
|
-
## Contributing
|
|
230
|
-
|
|
231
|
-
To add proxy support to a new provider:
|
|
232
|
-
|
|
233
|
-
1. Inherit from `OpenAICompatibleProvider`
|
|
234
|
-
2. Accept `disable_auto_proxy` parameter in `__init__`
|
|
235
|
-
3. Use `self.proxies` for HTTP requests
|
|
236
|
-
4. Optionally use helper methods like `self.get_proxied_session()`
|
|
237
|
-
|
|
238
|
-
The metaclass will handle the rest automatically!
|