webscout 8.3.4__py3-none-any.whl → 8.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +52 -1016
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +2 -0
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +7 -1
- webscout/Provider/OPENAI/TogetherAI.py +57 -48
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +0 -2
- webscout/Provider/OPENAI/deepinfra.py +6 -0
- webscout/Provider/OPENAI/scirachat.py +4 -0
- webscout/Provider/OPENAI/textpollinations.py +11 -7
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +30 -6
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/together.py +7 -6
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TextPollinationsAI.py +11 -7
- webscout/Provider/TogetherAI.py +57 -44
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +0 -6
- webscout/Provider/scira_chat.py +4 -0
- webscout/Provider/toolbaz.py +5 -10
- webscout/Provider/typefully.py +1 -11
- webscout/__init__.py +3 -15
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +99 -2
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/sanitize.py +1074 -0
- webscout/version.py +1 -1
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -149
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/RECORD +49 -51
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
|
@@ -1,238 +0,0 @@
|
|
|
1
|
-
# WebScout Auto-Proxy System
|
|
2
|
-
|
|
3
|
-
The WebScout Auto-Proxy system provides automatic proxy injection for all OpenAI-compatible providers. This system fetches proxies from a remote source and automatically configures them for HTTP sessions.
|
|
4
|
-
|
|
5
|
-
## Features
|
|
6
|
-
|
|
7
|
-
- **Automatic Proxy Injection**: All OpenAI-compatible providers automatically get proxy support
|
|
8
|
-
- **Multiple HTTP Client Support**: Works with `requests`, `httpx`, and `curl_cffi`
|
|
9
|
-
- **Proxy Pool Management**: Automatically fetches and caches proxies from remote source
|
|
10
|
-
- **Working Proxy Detection**: Tests proxies to find working ones
|
|
11
|
-
- **Easy Disable Option**: Can be disabled per provider instance or globally
|
|
12
|
-
|
|
13
|
-
## How It Works
|
|
14
|
-
|
|
15
|
-
The system uses a metaclass (`ProxyAutoMeta`) that automatically:
|
|
16
|
-
|
|
17
|
-
1. Fetches proxies from `http://207.180.209.185:5000/ips.txt`
|
|
18
|
-
2. Caches proxies for 5 minutes to avoid excessive requests
|
|
19
|
-
3. Randomly selects a proxy for each provider instance
|
|
20
|
-
4. Patches existing HTTP session objects with proxy configuration
|
|
21
|
-
5. Provides helper methods for creating proxied sessions
|
|
22
|
-
|
|
23
|
-
## Usage
|
|
24
|
-
|
|
25
|
-
### Automatic Usage (Default)
|
|
26
|
-
|
|
27
|
-
All OpenAI-compatible providers automatically get proxy support:
|
|
28
|
-
|
|
29
|
-
```python
|
|
30
|
-
from webscout.Provider.OPENAI.yep import YEPCHAT
|
|
31
|
-
|
|
32
|
-
# Proxy is automatically configured
|
|
33
|
-
client = YEPCHAT()
|
|
34
|
-
|
|
35
|
-
# All requests will use the configured proxy
|
|
36
|
-
response = client.chat.completions.create(
|
|
37
|
-
model="DeepSeek-R1-Distill-Qwen-32B",
|
|
38
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
39
|
-
)
|
|
40
|
-
```
|
|
41
|
-
|
|
42
|
-
### Disabling Auto-Proxy
|
|
43
|
-
|
|
44
|
-
You can disable automatic proxy injection:
|
|
45
|
-
|
|
46
|
-
```python
|
|
47
|
-
# Disable for a specific instance
|
|
48
|
-
client = YEPCHAT(disable_auto_proxy=True)
|
|
49
|
-
|
|
50
|
-
# Or set a class attribute to disable for all instances
|
|
51
|
-
class MyProvider(OpenAICompatibleProvider):
|
|
52
|
-
DISABLE_AUTO_PROXY = True
|
|
53
|
-
```
|
|
54
|
-
|
|
55
|
-
### Manual Proxy Configuration
|
|
56
|
-
|
|
57
|
-
You can also provide your own proxies:
|
|
58
|
-
|
|
59
|
-
```python
|
|
60
|
-
custom_proxies = {
|
|
61
|
-
'http': 'http://user:pass@proxy.example.com:8080',
|
|
62
|
-
'https': 'http://user:pass@proxy.example.com:8080'
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
client = YEPCHAT(proxies=custom_proxies)
|
|
66
|
-
```
|
|
67
|
-
|
|
68
|
-
### Using Helper Methods
|
|
69
|
-
|
|
70
|
-
Each provider instance gets helper methods for creating proxied sessions:
|
|
71
|
-
|
|
72
|
-
```python
|
|
73
|
-
client = YEPCHAT()
|
|
74
|
-
|
|
75
|
-
# Get a requests.Session with proxies configured
|
|
76
|
-
session = client.get_proxied_session()
|
|
77
|
-
|
|
78
|
-
# Get a curl_cffi Session with proxies configured
|
|
79
|
-
curl_session = client.get_proxied_curl_session(impersonate="chrome120")
|
|
80
|
-
|
|
81
|
-
# Get an httpx.Client with proxies configured (if httpx is installed)
|
|
82
|
-
httpx_client = client.get_proxied_httpx_client()
|
|
83
|
-
```
|
|
84
|
-
|
|
85
|
-
## Direct API Usage
|
|
86
|
-
|
|
87
|
-
You can also use the proxy functions directly:
|
|
88
|
-
|
|
89
|
-
```python
|
|
90
|
-
from webscout.Provider.OPENAI.autoproxy import (
|
|
91
|
-
get_auto_proxy,
|
|
92
|
-
get_proxy_dict,
|
|
93
|
-
get_working_proxy,
|
|
94
|
-
test_proxy,
|
|
95
|
-
get_proxy_stats
|
|
96
|
-
)
|
|
97
|
-
|
|
98
|
-
# Get a random proxy
|
|
99
|
-
proxy = get_auto_proxy()
|
|
100
|
-
|
|
101
|
-
# Get proxy in dictionary format
|
|
102
|
-
proxy_dict = get_proxy_dict()
|
|
103
|
-
|
|
104
|
-
# Find a working proxy (tests multiple proxies)
|
|
105
|
-
working_proxy = get_working_proxy(max_attempts=5)
|
|
106
|
-
|
|
107
|
-
# Test if a proxy is working
|
|
108
|
-
is_working = test_proxy(proxy)
|
|
109
|
-
|
|
110
|
-
# Get proxy cache statistics
|
|
111
|
-
stats = get_proxy_stats()
|
|
112
|
-
```
|
|
113
|
-
|
|
114
|
-
## Proxy Format
|
|
115
|
-
|
|
116
|
-
The system expects proxies in the format:
|
|
117
|
-
```
|
|
118
|
-
http://username:password@host:port
|
|
119
|
-
```
|
|
120
|
-
|
|
121
|
-
Example:
|
|
122
|
-
```
|
|
123
|
-
http://fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==:fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==@190.103.177.163:80
|
|
124
|
-
```
|
|
125
|
-
|
|
126
|
-
## Configuration
|
|
127
|
-
|
|
128
|
-
### Cache Duration
|
|
129
|
-
|
|
130
|
-
You can adjust the proxy cache duration:
|
|
131
|
-
|
|
132
|
-
```python
|
|
133
|
-
from webscout.Provider.OPENAI.autoproxy import set_proxy_cache_duration
|
|
134
|
-
|
|
135
|
-
# Set cache to 10 minutes
|
|
136
|
-
set_proxy_cache_duration(600)
|
|
137
|
-
```
|
|
138
|
-
|
|
139
|
-
### Force Refresh
|
|
140
|
-
|
|
141
|
-
You can force refresh the proxy cache:
|
|
142
|
-
|
|
143
|
-
```python
|
|
144
|
-
from webscout.Provider.OPENAI.autoproxy import refresh_proxy_cache
|
|
145
|
-
|
|
146
|
-
# Force refresh and get number of proxies loaded
|
|
147
|
-
count = refresh_proxy_cache()
|
|
148
|
-
print(f"Loaded {count} proxies")
|
|
149
|
-
```
|
|
150
|
-
|
|
151
|
-
## Error Handling
|
|
152
|
-
|
|
153
|
-
The system gracefully handles errors:
|
|
154
|
-
|
|
155
|
-
- If proxy fetching fails, providers work without proxies
|
|
156
|
-
- If a proxy test fails, the system tries other proxies
|
|
157
|
-
- If no working proxy is found, providers fall back to direct connections
|
|
158
|
-
|
|
159
|
-
## Logging
|
|
160
|
-
|
|
161
|
-
The system uses Python's logging module. To see proxy-related logs:
|
|
162
|
-
|
|
163
|
-
```python
|
|
164
|
-
import logging
|
|
165
|
-
logging.basicConfig(level=logging.INFO)
|
|
166
|
-
|
|
167
|
-
# Or specifically for the autoproxy module
|
|
168
|
-
logger = logging.getLogger('webscout.Provider.OPENAI.autoproxy')
|
|
169
|
-
logger.setLevel(logging.DEBUG)
|
|
170
|
-
```
|
|
171
|
-
|
|
172
|
-
## Testing
|
|
173
|
-
|
|
174
|
-
Run the test suite to verify functionality:
|
|
175
|
-
|
|
176
|
-
```bash
|
|
177
|
-
python webscout/Provider/OPENAI/test_autoproxy.py
|
|
178
|
-
```
|
|
179
|
-
|
|
180
|
-
## Implementation Details
|
|
181
|
-
|
|
182
|
-
### ProxyAutoMeta Metaclass
|
|
183
|
-
|
|
184
|
-
The `ProxyAutoMeta` metaclass is applied to `OpenAICompatibleProvider` and:
|
|
185
|
-
|
|
186
|
-
1. Intercepts class instantiation
|
|
187
|
-
2. Checks for `disable_auto_proxy` parameter or class attribute
|
|
188
|
-
3. Fetches and configures proxies if not disabled
|
|
189
|
-
4. Patches existing session objects
|
|
190
|
-
5. Adds helper methods to the instance
|
|
191
|
-
|
|
192
|
-
### Session Patching
|
|
193
|
-
|
|
194
|
-
The system automatically patches these session types:
|
|
195
|
-
- `requests.Session` - Updates the `proxies` attribute
|
|
196
|
-
- `httpx.Client` - Sets the `_proxies` attribute
|
|
197
|
-
- `curl_cffi.Session` - Updates the `proxies` attribute
|
|
198
|
-
- `curl_cffi.AsyncSession` - Updates the `proxies` attribute
|
|
199
|
-
|
|
200
|
-
### Proxy Source
|
|
201
|
-
|
|
202
|
-
Proxies are fetched from: `http://207.180.209.185:5000/ips.txt`
|
|
203
|
-
|
|
204
|
-
The system expects one proxy per line in the format shown above.
|
|
205
|
-
|
|
206
|
-
## Troubleshooting
|
|
207
|
-
|
|
208
|
-
### No Proxies Available
|
|
209
|
-
|
|
210
|
-
If you see "No proxies available" messages:
|
|
211
|
-
1. Check if the proxy source URL is accessible
|
|
212
|
-
2. Verify your internet connection
|
|
213
|
-
3. Check if the proxy format is correct
|
|
214
|
-
|
|
215
|
-
### Proxy Test Failures
|
|
216
|
-
|
|
217
|
-
If proxy tests fail:
|
|
218
|
-
1. Some proxies may be temporarily unavailable (normal)
|
|
219
|
-
2. The test URL (`https://httpbin.org/ip`) may be blocked
|
|
220
|
-
3. Network connectivity issues
|
|
221
|
-
|
|
222
|
-
### Provider Not Getting Proxies
|
|
223
|
-
|
|
224
|
-
If a provider doesn't get automatic proxies:
|
|
225
|
-
1. Ensure it inherits from `OpenAICompatibleProvider`
|
|
226
|
-
2. Check if `disable_auto_proxy` is set
|
|
227
|
-
3. Verify the metaclass is properly imported
|
|
228
|
-
|
|
229
|
-
## Contributing
|
|
230
|
-
|
|
231
|
-
To add proxy support to a new provider:
|
|
232
|
-
|
|
233
|
-
1. Inherit from `OpenAICompatibleProvider`
|
|
234
|
-
2. Accept `disable_auto_proxy` parameter in `__init__`
|
|
235
|
-
3. Use `self.proxies` for HTTP requests
|
|
236
|
-
4. Optionally use helper methods like `self.get_proxied_session()`
|
|
237
|
-
|
|
238
|
-
The metaclass will handle the rest automatically!
|
|
@@ -1,368 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import requests
|
|
4
|
-
import json
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import base classes and utility structures
|
|
8
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
-
from .utils import (
|
|
10
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
# Attempt to import LitAgent, fallback if not available
|
|
15
|
-
try:
|
|
16
|
-
from webscout.litagent import LitAgent
|
|
17
|
-
except ImportError:
|
|
18
|
-
print("Warning: LitAgent not found. Functionality may be limited.")
|
|
19
|
-
|
|
20
|
-
# --- TypeGPT Client ---
|
|
21
|
-
|
|
22
|
-
class Completions(BaseCompletions):
|
|
23
|
-
def __init__(self, client: 'TypeGPT'):
|
|
24
|
-
self._client = client
|
|
25
|
-
|
|
26
|
-
def create(
|
|
27
|
-
self,
|
|
28
|
-
*,
|
|
29
|
-
model: str,
|
|
30
|
-
messages: List[Dict[str, str]],
|
|
31
|
-
max_tokens: Optional[int] = None,
|
|
32
|
-
stream: bool = False,
|
|
33
|
-
temperature: Optional[float] = None,
|
|
34
|
-
top_p: Optional[float] = None,
|
|
35
|
-
presence_penalty: Optional[float] = None,
|
|
36
|
-
frequency_penalty: Optional[float] = None,
|
|
37
|
-
timeout: Optional[int] = None,
|
|
38
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
39
|
-
**kwargs: Any
|
|
40
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
41
|
-
"""
|
|
42
|
-
Creates a model response for the given chat conversation.
|
|
43
|
-
Mimics openai.chat.completions.create
|
|
44
|
-
"""
|
|
45
|
-
# Prepare the payload for TypeGPT API
|
|
46
|
-
payload = {
|
|
47
|
-
"messages": messages,
|
|
48
|
-
"stream": stream,
|
|
49
|
-
"model": self._client.convert_model_name(model),
|
|
50
|
-
"temperature": temperature if temperature is not None else self._client.temperature,
|
|
51
|
-
"top_p": top_p if top_p is not None else self._client.top_p,
|
|
52
|
-
"presence_penalty": presence_penalty if presence_penalty is not None else self._client.presence_penalty,
|
|
53
|
-
"frequency_penalty": frequency_penalty if frequency_penalty is not None else self._client.frequency_penalty,
|
|
54
|
-
"max_tokens": max_tokens if max_tokens is not None else self._client.max_tokens,
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
# Add any additional parameters
|
|
58
|
-
for key, value in kwargs.items():
|
|
59
|
-
if key not in payload:
|
|
60
|
-
payload[key] = value
|
|
61
|
-
|
|
62
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
63
|
-
created_time = int(time.time())
|
|
64
|
-
|
|
65
|
-
if stream:
|
|
66
|
-
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
67
|
-
else:
|
|
68
|
-
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
69
|
-
|
|
70
|
-
def _create_stream(
|
|
71
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
72
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
73
|
-
try:
|
|
74
|
-
response = self._client.session.post(
|
|
75
|
-
self._client.api_endpoint,
|
|
76
|
-
headers=self._client.headers,
|
|
77
|
-
json=payload,
|
|
78
|
-
stream=True,
|
|
79
|
-
timeout=timeout or self._client.timeout,
|
|
80
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
81
|
-
)
|
|
82
|
-
|
|
83
|
-
# Handle non-200 responses
|
|
84
|
-
if not response.ok:
|
|
85
|
-
raise IOError(
|
|
86
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
87
|
-
)
|
|
88
|
-
|
|
89
|
-
# Track token usage across chunks
|
|
90
|
-
prompt_tokens = 0
|
|
91
|
-
completion_tokens = 0
|
|
92
|
-
total_tokens = 0
|
|
93
|
-
|
|
94
|
-
# Estimate prompt tokens based on message length
|
|
95
|
-
for msg in payload.get("messages", []):
|
|
96
|
-
prompt_tokens += count_tokens(msg.get("content", ""))
|
|
97
|
-
|
|
98
|
-
for line in response.iter_lines():
|
|
99
|
-
if not line:
|
|
100
|
-
continue
|
|
101
|
-
|
|
102
|
-
decoded_line = line.decode('utf-8').strip()
|
|
103
|
-
|
|
104
|
-
if decoded_line.startswith("data: "):
|
|
105
|
-
json_str = decoded_line[6:]
|
|
106
|
-
if json_str == "[DONE]":
|
|
107
|
-
break
|
|
108
|
-
|
|
109
|
-
try:
|
|
110
|
-
data = json.loads(json_str)
|
|
111
|
-
choice_data = data.get('choices', [{}])[0]
|
|
112
|
-
delta_data = choice_data.get('delta', {})
|
|
113
|
-
finish_reason = choice_data.get('finish_reason')
|
|
114
|
-
|
|
115
|
-
# Update token counts if available
|
|
116
|
-
usage_data = data.get('usage', {})
|
|
117
|
-
if usage_data:
|
|
118
|
-
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
119
|
-
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
120
|
-
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
121
|
-
|
|
122
|
-
# Create the delta object
|
|
123
|
-
delta = ChoiceDelta(
|
|
124
|
-
content=delta_data.get('content'),
|
|
125
|
-
role=delta_data.get('role'),
|
|
126
|
-
tool_calls=delta_data.get('tool_calls')
|
|
127
|
-
)
|
|
128
|
-
|
|
129
|
-
# Create the choice object
|
|
130
|
-
choice = Choice(
|
|
131
|
-
index=choice_data.get('index', 0),
|
|
132
|
-
delta=delta,
|
|
133
|
-
finish_reason=finish_reason,
|
|
134
|
-
logprobs=choice_data.get('logprobs')
|
|
135
|
-
)
|
|
136
|
-
|
|
137
|
-
# Create the chunk object
|
|
138
|
-
chunk = ChatCompletionChunk(
|
|
139
|
-
id=request_id,
|
|
140
|
-
choices=[choice],
|
|
141
|
-
created=created_time,
|
|
142
|
-
model=model,
|
|
143
|
-
system_fingerprint=data.get('system_fingerprint')
|
|
144
|
-
)
|
|
145
|
-
|
|
146
|
-
# Convert chunk to dict using Pydantic's API
|
|
147
|
-
if hasattr(chunk, "model_dump"):
|
|
148
|
-
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
149
|
-
else:
|
|
150
|
-
chunk_dict = chunk.dict(exclude_none=True)
|
|
151
|
-
|
|
152
|
-
# Add usage information to match OpenAI format
|
|
153
|
-
usage_dict = {
|
|
154
|
-
"prompt_tokens": prompt_tokens or 10,
|
|
155
|
-
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
156
|
-
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
157
|
-
"estimated_cost": None
|
|
158
|
-
}
|
|
159
|
-
|
|
160
|
-
# Update completion_tokens and total_tokens as we receive more content
|
|
161
|
-
if delta_data.get('content'):
|
|
162
|
-
completion_tokens += 1
|
|
163
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
164
|
-
usage_dict["completion_tokens"] = completion_tokens
|
|
165
|
-
usage_dict["total_tokens"] = total_tokens
|
|
166
|
-
|
|
167
|
-
chunk_dict["usage"] = usage_dict
|
|
168
|
-
|
|
169
|
-
# Return the chunk object for internal processing
|
|
170
|
-
yield chunk
|
|
171
|
-
except json.JSONDecodeError:
|
|
172
|
-
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
173
|
-
continue
|
|
174
|
-
|
|
175
|
-
# Final chunk with finish_reason="stop"
|
|
176
|
-
delta = ChoiceDelta(
|
|
177
|
-
content=None,
|
|
178
|
-
role=None,
|
|
179
|
-
tool_calls=None
|
|
180
|
-
)
|
|
181
|
-
|
|
182
|
-
choice = Choice(
|
|
183
|
-
index=0,
|
|
184
|
-
delta=delta,
|
|
185
|
-
finish_reason="stop",
|
|
186
|
-
logprobs=None
|
|
187
|
-
)
|
|
188
|
-
|
|
189
|
-
chunk = ChatCompletionChunk(
|
|
190
|
-
id=request_id,
|
|
191
|
-
choices=[choice],
|
|
192
|
-
created=created_time,
|
|
193
|
-
model=model,
|
|
194
|
-
system_fingerprint=None
|
|
195
|
-
)
|
|
196
|
-
|
|
197
|
-
if hasattr(chunk, "model_dump"):
|
|
198
|
-
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
199
|
-
else:
|
|
200
|
-
chunk_dict = chunk.dict(exclude_none=True)
|
|
201
|
-
chunk_dict["usage"] = {
|
|
202
|
-
"prompt_tokens": prompt_tokens,
|
|
203
|
-
"completion_tokens": completion_tokens,
|
|
204
|
-
"total_tokens": total_tokens,
|
|
205
|
-
"estimated_cost": None
|
|
206
|
-
}
|
|
207
|
-
|
|
208
|
-
yield chunk
|
|
209
|
-
|
|
210
|
-
except Exception as e:
|
|
211
|
-
print(f"Error during TypeGPT stream request: {e}")
|
|
212
|
-
raise IOError(f"TypeGPT request failed: {e}") from e
|
|
213
|
-
|
|
214
|
-
def _create_non_stream(
|
|
215
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
216
|
-
) -> ChatCompletion:
|
|
217
|
-
try:
|
|
218
|
-
response = self._client.session.post(
|
|
219
|
-
self._client.api_endpoint,
|
|
220
|
-
headers=self._client.headers,
|
|
221
|
-
json=payload,
|
|
222
|
-
timeout=timeout or self._client.timeout,
|
|
223
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
224
|
-
)
|
|
225
|
-
|
|
226
|
-
# Handle non-200 responses
|
|
227
|
-
if not response.ok:
|
|
228
|
-
raise IOError(
|
|
229
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
230
|
-
)
|
|
231
|
-
|
|
232
|
-
# Parse the response
|
|
233
|
-
data = response.json()
|
|
234
|
-
|
|
235
|
-
choices_data = data.get('choices', [])
|
|
236
|
-
usage_data = data.get('usage', {})
|
|
237
|
-
|
|
238
|
-
choices = []
|
|
239
|
-
for choice_d in choices_data:
|
|
240
|
-
message_d = choice_d.get('message', {})
|
|
241
|
-
message = ChatCompletionMessage(
|
|
242
|
-
role=message_d.get('role', 'assistant'),
|
|
243
|
-
content=message_d.get('content', '')
|
|
244
|
-
)
|
|
245
|
-
choice = Choice(
|
|
246
|
-
index=choice_d.get('index', 0),
|
|
247
|
-
message=message,
|
|
248
|
-
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
249
|
-
)
|
|
250
|
-
choices.append(choice)
|
|
251
|
-
|
|
252
|
-
usage = CompletionUsage(
|
|
253
|
-
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
254
|
-
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
255
|
-
total_tokens=usage_data.get('total_tokens', 0)
|
|
256
|
-
)
|
|
257
|
-
|
|
258
|
-
completion = ChatCompletion(
|
|
259
|
-
id=request_id,
|
|
260
|
-
choices=choices,
|
|
261
|
-
created=created_time,
|
|
262
|
-
model=data.get('model', model),
|
|
263
|
-
usage=usage,
|
|
264
|
-
)
|
|
265
|
-
return completion
|
|
266
|
-
|
|
267
|
-
except Exception as e:
|
|
268
|
-
print(f"Error during TypeGPT non-stream request: {e}")
|
|
269
|
-
raise IOError(f"TypeGPT request failed: {e}") from e
|
|
270
|
-
|
|
271
|
-
class Chat(BaseChat):
|
|
272
|
-
def __init__(self, client: 'TypeGPT'):
|
|
273
|
-
self.completions = Completions(client)
|
|
274
|
-
|
|
275
|
-
class TypeGPT(OpenAICompatibleProvider):
|
|
276
|
-
"""
|
|
277
|
-
OpenAI-compatible client for TypeGPT API.
|
|
278
|
-
|
|
279
|
-
Usage:
|
|
280
|
-
client = TypeGPT()
|
|
281
|
-
response = client.chat.completions.create(
|
|
282
|
-
model="gpt-4o",
|
|
283
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
284
|
-
)
|
|
285
|
-
"""
|
|
286
|
-
|
|
287
|
-
AVAILABLE_MODELS = [
|
|
288
|
-
# Working Models (based on testing)
|
|
289
|
-
"gpt-4o-mini",
|
|
290
|
-
"chatgpt-4o-latest",
|
|
291
|
-
# "deepseek-r1",
|
|
292
|
-
"deepseek-v3",
|
|
293
|
-
# "uncensored-r1",
|
|
294
|
-
# "Image-Generator",
|
|
295
|
-
]
|
|
296
|
-
|
|
297
|
-
def __init__(
|
|
298
|
-
self,
|
|
299
|
-
timeout: Optional[int] = None,
|
|
300
|
-
browser: str = "chrome"
|
|
301
|
-
):
|
|
302
|
-
"""
|
|
303
|
-
Initialize the TypeGPT client.
|
|
304
|
-
|
|
305
|
-
Args:
|
|
306
|
-
timeout: Request timeout in seconds (None for no timeout)
|
|
307
|
-
browser: Browser to emulate in user agent
|
|
308
|
-
"""
|
|
309
|
-
self.timeout = timeout or 60 # Default to 30 seconds if None
|
|
310
|
-
self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
|
|
311
|
-
self.session = requests.Session()
|
|
312
|
-
|
|
313
|
-
# Default parameters
|
|
314
|
-
self.max_tokens = 4000
|
|
315
|
-
self.temperature = 0.5
|
|
316
|
-
self.presence_penalty = 0
|
|
317
|
-
self.frequency_penalty = 0
|
|
318
|
-
self.top_p = 1
|
|
319
|
-
|
|
320
|
-
# Initialize LitAgent for user agent generation
|
|
321
|
-
agent = LitAgent()
|
|
322
|
-
self.fingerprint = agent.generate_fingerprint(browser)
|
|
323
|
-
|
|
324
|
-
# Headers for the request
|
|
325
|
-
self.headers = {
|
|
326
|
-
"authority": "chat.typegpt.net",
|
|
327
|
-
"accept": "application/json, text/event-stream",
|
|
328
|
-
"accept-language": self.fingerprint["accept_language"],
|
|
329
|
-
"content-type": "application/json",
|
|
330
|
-
"origin": "https://chat.typegpt.net",
|
|
331
|
-
"referer": "https://chat.typegpt.net/",
|
|
332
|
-
"user-agent": self.fingerprint["user_agent"]
|
|
333
|
-
}
|
|
334
|
-
|
|
335
|
-
self.session.headers.update(self.headers)
|
|
336
|
-
|
|
337
|
-
# Initialize the chat interface
|
|
338
|
-
self.chat = Chat(self)
|
|
339
|
-
|
|
340
|
-
def convert_model_name(self, model: str) -> str:
|
|
341
|
-
"""
|
|
342
|
-
Convert model names to ones supported by TypeGPT.
|
|
343
|
-
|
|
344
|
-
Args:
|
|
345
|
-
model: Model name to convert
|
|
346
|
-
|
|
347
|
-
Returns:
|
|
348
|
-
TypeGPT model name
|
|
349
|
-
"""
|
|
350
|
-
# If the model is already a valid TypeGPT model, return it
|
|
351
|
-
if model in self.AVAILABLE_MODELS:
|
|
352
|
-
return model
|
|
353
|
-
|
|
354
|
-
# Default to chatgpt-4o-latest if model not found (this one works reliably)
|
|
355
|
-
print(f"Warning: Unknown model '{model}'. Using 'chatgpt-4o-latest' instead.")
|
|
356
|
-
return "chatgpt-4o-latest"
|
|
357
|
-
|
|
358
|
-
@property
|
|
359
|
-
def models(self):
|
|
360
|
-
class _ModelList:
|
|
361
|
-
def list(inner_self):
|
|
362
|
-
return type(self).AVAILABLE_MODELS
|
|
363
|
-
return _ModelList()
|
|
364
|
-
|
|
365
|
-
@classmethod
|
|
366
|
-
def models(cls):
|
|
367
|
-
"""Return the list of available models for TypeGPT."""
|
|
368
|
-
return cls.AVAILABLE_MODELS
|