webscout 8.3.3__py3-none-any.whl → 8.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (79) hide show
  1. webscout/AIutel.py +53 -800
  2. webscout/Bard.py +2 -22
  3. webscout/Provider/AISEARCH/__init__.py +11 -10
  4. webscout/Provider/AISEARCH/felo_search.py +7 -3
  5. webscout/Provider/AISEARCH/scira_search.py +26 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +53 -8
  7. webscout/Provider/Deepinfra.py +81 -57
  8. webscout/Provider/ExaChat.py +9 -5
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/LambdaChat.py +39 -31
  14. webscout/Provider/Netwrck.py +5 -8
  15. webscout/Provider/OLLAMA.py +8 -9
  16. webscout/Provider/OPENAI/README.md +1 -1
  17. webscout/Provider/OPENAI/TogetherAI.py +57 -48
  18. webscout/Provider/OPENAI/TwoAI.py +94 -1
  19. webscout/Provider/OPENAI/__init__.py +1 -3
  20. webscout/Provider/OPENAI/autoproxy.py +1 -1
  21. webscout/Provider/OPENAI/copilot.py +73 -26
  22. webscout/Provider/OPENAI/deepinfra.py +60 -24
  23. webscout/Provider/OPENAI/exachat.py +9 -5
  24. webscout/Provider/OPENAI/monochat.py +3 -3
  25. webscout/Provider/OPENAI/netwrck.py +4 -7
  26. webscout/Provider/OPENAI/qodo.py +630 -0
  27. webscout/Provider/OPENAI/scirachat.py +86 -49
  28. webscout/Provider/OPENAI/textpollinations.py +19 -14
  29. webscout/Provider/OPENAI/venice.py +1 -0
  30. webscout/Provider/Perplexitylabs.py +163 -147
  31. webscout/Provider/Qodo.py +478 -0
  32. webscout/Provider/TTI/__init__.py +1 -0
  33. webscout/Provider/TTI/monochat.py +3 -3
  34. webscout/Provider/TTI/together.py +7 -6
  35. webscout/Provider/TTI/venice.py +368 -0
  36. webscout/Provider/TextPollinationsAI.py +19 -14
  37. webscout/Provider/TogetherAI.py +57 -44
  38. webscout/Provider/TwoAI.py +96 -2
  39. webscout/Provider/TypliAI.py +33 -27
  40. webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
  41. webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
  42. webscout/Provider/Venice.py +1 -0
  43. webscout/Provider/WiseCat.py +18 -20
  44. webscout/Provider/__init__.py +4 -10
  45. webscout/Provider/copilot.py +58 -61
  46. webscout/Provider/freeaichat.py +64 -55
  47. webscout/Provider/monochat.py +275 -0
  48. webscout/Provider/scira_chat.py +115 -21
  49. webscout/Provider/toolbaz.py +5 -10
  50. webscout/Provider/typefully.py +1 -11
  51. webscout/Provider/x0gpt.py +325 -315
  52. webscout/__init__.py +4 -11
  53. webscout/auth/__init__.py +19 -4
  54. webscout/auth/api_key_manager.py +189 -189
  55. webscout/auth/auth_system.py +25 -40
  56. webscout/auth/config.py +105 -6
  57. webscout/auth/database.py +377 -22
  58. webscout/auth/models.py +185 -130
  59. webscout/auth/request_processing.py +175 -11
  60. webscout/auth/routes.py +119 -5
  61. webscout/auth/server.py +9 -2
  62. webscout/auth/simple_logger.py +236 -0
  63. webscout/sanitize.py +1074 -0
  64. webscout/version.py +1 -1
  65. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -150
  66. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/RECORD +70 -72
  67. webscout/Provider/AI21.py +0 -177
  68. webscout/Provider/HuggingFaceChat.py +0 -469
  69. webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
  70. webscout/Provider/OPENAI/freeaichat.py +0 -363
  71. webscout/Provider/OPENAI/typegpt.py +0 -368
  72. webscout/Provider/OPENAI/uncovrAI.py +0 -477
  73. webscout/Provider/WritingMate.py +0 -273
  74. webscout/Provider/typegpt.py +0 -284
  75. webscout/Provider/uncovr.py +0 -333
  76. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
  77. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
  78. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
  79. {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
@@ -1,238 +0,0 @@
1
- # WebScout Auto-Proxy System
2
-
3
- The WebScout Auto-Proxy system provides automatic proxy injection for all OpenAI-compatible providers. This system fetches proxies from a remote source and automatically configures them for HTTP sessions.
4
-
5
- ## Features
6
-
7
- - **Automatic Proxy Injection**: All OpenAI-compatible providers automatically get proxy support
8
- - **Multiple HTTP Client Support**: Works with `requests`, `httpx`, and `curl_cffi`
9
- - **Proxy Pool Management**: Automatically fetches and caches proxies from remote source
10
- - **Working Proxy Detection**: Tests proxies to find working ones
11
- - **Easy Disable Option**: Can be disabled per provider instance or globally
12
-
13
- ## How It Works
14
-
15
- The system uses a metaclass (`ProxyAutoMeta`) that automatically:
16
-
17
- 1. Fetches proxies from `http://207.180.209.185:5000/ips.txt`
18
- 2. Caches proxies for 5 minutes to avoid excessive requests
19
- 3. Randomly selects a proxy for each provider instance
20
- 4. Patches existing HTTP session objects with proxy configuration
21
- 5. Provides helper methods for creating proxied sessions
22
-
23
- ## Usage
24
-
25
- ### Automatic Usage (Default)
26
-
27
- All OpenAI-compatible providers automatically get proxy support:
28
-
29
- ```python
30
- from webscout.Provider.OPENAI.yep import YEPCHAT
31
-
32
- # Proxy is automatically configured
33
- client = YEPCHAT()
34
-
35
- # All requests will use the configured proxy
36
- response = client.chat.completions.create(
37
- model="DeepSeek-R1-Distill-Qwen-32B",
38
- messages=[{"role": "user", "content": "Hello!"}]
39
- )
40
- ```
41
-
42
- ### Disabling Auto-Proxy
43
-
44
- You can disable automatic proxy injection:
45
-
46
- ```python
47
- # Disable for a specific instance
48
- client = YEPCHAT(disable_auto_proxy=True)
49
-
50
- # Or set a class attribute to disable for all instances
51
- class MyProvider(OpenAICompatibleProvider):
52
- DISABLE_AUTO_PROXY = True
53
- ```
54
-
55
- ### Manual Proxy Configuration
56
-
57
- You can also provide your own proxies:
58
-
59
- ```python
60
- custom_proxies = {
61
- 'http': 'http://user:pass@proxy.example.com:8080',
62
- 'https': 'http://user:pass@proxy.example.com:8080'
63
- }
64
-
65
- client = YEPCHAT(proxies=custom_proxies)
66
- ```
67
-
68
- ### Using Helper Methods
69
-
70
- Each provider instance gets helper methods for creating proxied sessions:
71
-
72
- ```python
73
- client = YEPCHAT()
74
-
75
- # Get a requests.Session with proxies configured
76
- session = client.get_proxied_session()
77
-
78
- # Get a curl_cffi Session with proxies configured
79
- curl_session = client.get_proxied_curl_session(impersonate="chrome120")
80
-
81
- # Get an httpx.Client with proxies configured (if httpx is installed)
82
- httpx_client = client.get_proxied_httpx_client()
83
- ```
84
-
85
- ## Direct API Usage
86
-
87
- You can also use the proxy functions directly:
88
-
89
- ```python
90
- from webscout.Provider.OPENAI.autoproxy import (
91
- get_auto_proxy,
92
- get_proxy_dict,
93
- get_working_proxy,
94
- test_proxy,
95
- get_proxy_stats
96
- )
97
-
98
- # Get a random proxy
99
- proxy = get_auto_proxy()
100
-
101
- # Get proxy in dictionary format
102
- proxy_dict = get_proxy_dict()
103
-
104
- # Find a working proxy (tests multiple proxies)
105
- working_proxy = get_working_proxy(max_attempts=5)
106
-
107
- # Test if a proxy is working
108
- is_working = test_proxy(proxy)
109
-
110
- # Get proxy cache statistics
111
- stats = get_proxy_stats()
112
- ```
113
-
114
- ## Proxy Format
115
-
116
- The system expects proxies in the format:
117
- ```
118
- http://username:password@host:port
119
- ```
120
-
121
- Example:
122
- ```
123
- http://fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==:fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==@190.103.177.163:80
124
- ```
125
-
126
- ## Configuration
127
-
128
- ### Cache Duration
129
-
130
- You can adjust the proxy cache duration:
131
-
132
- ```python
133
- from webscout.Provider.OPENAI.autoproxy import set_proxy_cache_duration
134
-
135
- # Set cache to 10 minutes
136
- set_proxy_cache_duration(600)
137
- ```
138
-
139
- ### Force Refresh
140
-
141
- You can force refresh the proxy cache:
142
-
143
- ```python
144
- from webscout.Provider.OPENAI.autoproxy import refresh_proxy_cache
145
-
146
- # Force refresh and get number of proxies loaded
147
- count = refresh_proxy_cache()
148
- print(f"Loaded {count} proxies")
149
- ```
150
-
151
- ## Error Handling
152
-
153
- The system gracefully handles errors:
154
-
155
- - If proxy fetching fails, providers work without proxies
156
- - If a proxy test fails, the system tries other proxies
157
- - If no working proxy is found, providers fall back to direct connections
158
-
159
- ## Logging
160
-
161
- The system uses Python's logging module. To see proxy-related logs:
162
-
163
- ```python
164
- import logging
165
- logging.basicConfig(level=logging.INFO)
166
-
167
- # Or specifically for the autoproxy module
168
- logger = logging.getLogger('webscout.Provider.OPENAI.autoproxy')
169
- logger.setLevel(logging.DEBUG)
170
- ```
171
-
172
- ## Testing
173
-
174
- Run the test suite to verify functionality:
175
-
176
- ```bash
177
- python webscout/Provider/OPENAI/test_autoproxy.py
178
- ```
179
-
180
- ## Implementation Details
181
-
182
- ### ProxyAutoMeta Metaclass
183
-
184
- The `ProxyAutoMeta` metaclass is applied to `OpenAICompatibleProvider` and:
185
-
186
- 1. Intercepts class instantiation
187
- 2. Checks for `disable_auto_proxy` parameter or class attribute
188
- 3. Fetches and configures proxies if not disabled
189
- 4. Patches existing session objects
190
- 5. Adds helper methods to the instance
191
-
192
- ### Session Patching
193
-
194
- The system automatically patches these session types:
195
- - `requests.Session` - Updates the `proxies` attribute
196
- - `httpx.Client` - Sets the `_proxies` attribute
197
- - `curl_cffi.Session` - Updates the `proxies` attribute
198
- - `curl_cffi.AsyncSession` - Updates the `proxies` attribute
199
-
200
- ### Proxy Source
201
-
202
- Proxies are fetched from: `http://207.180.209.185:5000/ips.txt`
203
-
204
- The system expects one proxy per line in the format shown above.
205
-
206
- ## Troubleshooting
207
-
208
- ### No Proxies Available
209
-
210
- If you see "No proxies available" messages:
211
- 1. Check if the proxy source URL is accessible
212
- 2. Verify your internet connection
213
- 3. Check if the proxy format is correct
214
-
215
- ### Proxy Test Failures
216
-
217
- If proxy tests fail:
218
- 1. Some proxies may be temporarily unavailable (normal)
219
- 2. The test URL (`https://httpbin.org/ip`) may be blocked
220
- 3. Network connectivity issues
221
-
222
- ### Provider Not Getting Proxies
223
-
224
- If a provider doesn't get automatic proxies:
225
- 1. Ensure it inherits from `OpenAICompatibleProvider`
226
- 2. Check if `disable_auto_proxy` is set
227
- 3. Verify the metaclass is properly imported
228
-
229
- ## Contributing
230
-
231
- To add proxy support to a new provider:
232
-
233
- 1. Inherit from `OpenAICompatibleProvider`
234
- 2. Accept `disable_auto_proxy` parameter in `__init__`
235
- 3. Use `self.proxies` for HTTP requests
236
- 4. Optionally use helper methods like `self.get_proxied_session()`
237
-
238
- The metaclass will handle the rest automatically!
@@ -1,363 +0,0 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage, count_tokens
12
- )
13
-
14
- # Attempt to import LitAgent, fallback if not available
15
- try:
16
- from webscout.litagent import LitAgent
17
- except ImportError:
18
- pass
19
-
20
- # --- FreeAIChat Client ---
21
-
22
- class Completions(BaseCompletions):
23
- def __init__(self, client: 'FreeAIChat'):
24
- self._client = client
25
-
26
- def create(
27
- self,
28
- *,
29
- model: str,
30
- messages: List[Dict[str, str]],
31
- max_tokens: Optional[int] = 2049,
32
- stream: bool = False,
33
- temperature: Optional[float] = None,
34
- top_p: Optional[float] = None,
35
- timeout: Optional[int] = None,
36
- proxies: Optional[Dict[str, str]] = None,
37
- **kwargs: Any
38
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
- """
40
- Creates a model response for the given chat conversation.
41
- Mimics openai.chat.completions.create
42
- """
43
- payload = {
44
- "model": model,
45
- "messages": messages,
46
- "max_tokens": max_tokens,
47
- "stream": stream,
48
- }
49
- if temperature is not None:
50
- payload["temperature"] = temperature
51
- if top_p is not None:
52
- payload["top_p"] = top_p
53
-
54
- payload.update(kwargs)
55
-
56
- request_id = f"chatcmpl-{uuid.uuid4()}"
57
- created_time = int(time.time())
58
-
59
- if stream:
60
- return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
61
- else:
62
- return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
63
-
64
- def _create_stream(
65
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
66
- ) -> Generator[ChatCompletionChunk, None, None]:
67
- try:
68
- response = self._client.session.post(
69
- self._client.api_endpoint,
70
- headers=self._client.headers,
71
- json=payload,
72
- stream=True,
73
- timeout=timeout or self._client.timeout,
74
- proxies=proxies or getattr(self._client, "proxies", None)
75
- )
76
-
77
- # Handle non-200 responses
78
- if not response.ok:
79
- raise IOError(
80
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
81
- )
82
-
83
- # Track token usage across chunks
84
- prompt_tokens = 0
85
- completion_tokens = 0
86
- total_tokens = 0
87
-
88
- # Estimate prompt tokens based on message length
89
- for msg in payload.get("messages", []):
90
- prompt_tokens += count_tokens(msg.get("content", ""))
91
-
92
- for line in response.iter_lines():
93
- if not line:
94
- continue
95
-
96
- line_str = line.decode('utf-8').strip()
97
-
98
- if line_str.startswith("data: "):
99
- json_str = line_str[6:] # Remove "data: " prefix
100
- if json_str == "[DONE]":
101
- break
102
-
103
- try:
104
- data = json.loads(json_str)
105
- choice_data = data.get('choices', [{}])[0]
106
- delta_data = choice_data.get('delta', {})
107
- finish_reason = choice_data.get('finish_reason')
108
-
109
- # Update token counts if available
110
- usage_data = data.get('usage', {})
111
- if usage_data:
112
- prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
113
- completion_tokens = usage_data.get('completion_tokens', completion_tokens)
114
- total_tokens = usage_data.get('total_tokens', total_tokens)
115
-
116
- # Create the delta object
117
- delta = ChoiceDelta(
118
- content=delta_data.get('content'),
119
- role=delta_data.get('role'),
120
- tool_calls=delta_data.get('tool_calls')
121
- )
122
-
123
- # Create the choice object
124
- choice = Choice(
125
- index=choice_data.get('index', 0),
126
- delta=delta,
127
- finish_reason=finish_reason,
128
- logprobs=choice_data.get('logprobs')
129
- )
130
-
131
- # Create the chunk object
132
- chunk = ChatCompletionChunk(
133
- id=request_id,
134
- choices=[choice],
135
- created=created_time,
136
- model=model,
137
- system_fingerprint=data.get('system_fingerprint')
138
- )
139
-
140
- # Return the chunk object
141
- yield chunk
142
- except json.JSONDecodeError:
143
- print(f"Warning: Could not decode JSON line: {json_str}")
144
- continue
145
-
146
- # Final chunk with finish_reason="stop"
147
- delta = ChoiceDelta(
148
- content=None,
149
- role=None,
150
- tool_calls=None
151
- )
152
-
153
- choice = Choice(
154
- index=0,
155
- delta=delta,
156
- finish_reason="stop",
157
- logprobs=None
158
- )
159
-
160
- chunk = ChatCompletionChunk(
161
- id=request_id,
162
- choices=[choice],
163
- created=created_time,
164
- model=model,
165
- system_fingerprint=None
166
- )
167
-
168
- yield chunk
169
-
170
- except Exception as e:
171
- print(f"Error during FreeAIChat stream request: {e}")
172
- raise IOError(f"FreeAIChat request failed: {e}") from e
173
-
174
- def _create_non_stream(
175
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
176
- ) -> ChatCompletion:
177
- try:
178
- response = self._client.session.post(
179
- self._client.api_endpoint,
180
- headers=self._client.headers,
181
- json=payload,
182
- timeout=timeout or self._client.timeout,
183
- proxies=proxies or getattr(self._client, "proxies", None)
184
- )
185
-
186
- # Handle non-200 responses
187
- if not response.ok:
188
- raise IOError(
189
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
190
- )
191
-
192
- # Parse the response
193
- data = response.json()
194
-
195
- choices_data = data.get('choices', [])
196
- usage_data = data.get('usage', {})
197
-
198
- choices = []
199
- for choice_d in choices_data:
200
- message_d = choice_d.get('message', {})
201
- message = ChatCompletionMessage(
202
- role=message_d.get('role', 'assistant'),
203
- content=message_d.get('content', '')
204
- )
205
- choice = Choice(
206
- index=choice_d.get('index', 0),
207
- message=message,
208
- finish_reason=choice_d.get('finish_reason', 'stop')
209
- )
210
- choices.append(choice)
211
-
212
- usage = CompletionUsage(
213
- prompt_tokens=usage_data.get('prompt_tokens', 0),
214
- completion_tokens=usage_data.get('completion_tokens', 0),
215
- total_tokens=usage_data.get('total_tokens', 0)
216
- )
217
-
218
- completion = ChatCompletion(
219
- id=request_id,
220
- choices=choices,
221
- created=created_time,
222
- model=data.get('model', model),
223
- usage=usage,
224
- )
225
- return completion
226
-
227
- except Exception as e:
228
- print(f"Error during FreeAIChat non-stream request: {e}")
229
- raise IOError(f"FreeAIChat request failed: {e}") from e
230
-
231
- class Chat(BaseChat):
232
- def __init__(self, client: 'FreeAIChat'):
233
- self.completions = Completions(client)
234
-
235
- class FreeAIChat(OpenAICompatibleProvider):
236
- """
237
- OpenAI-compatible client for FreeAIChat API.
238
-
239
- Usage:
240
- client = FreeAIChat()
241
- response = client.chat.completions.create(
242
- model="GPT 4o",
243
- messages=[{"role": "user", "content": "Hello!"}]
244
- )
245
- """
246
-
247
- AVAILABLE_MODELS = [
248
- # OpenAI Models
249
- "GPT 4o",
250
- "GPT 4.5 Preview",
251
- "GPT 4o Latest",
252
- "GPT 4o mini",
253
- "GPT 4o Search Preview",
254
- "O1",
255
- "O1 Mini",
256
- "O3 Mini",
257
- "O3 Mini High",
258
- "O3 Mini Low",
259
-
260
- # Anthropic Models
261
- "Claude 3.5 haiku",
262
- "claude 3.5 sonnet",
263
- "Claude 3.7 Sonnet",
264
- "Claude 3.7 Sonnet (Thinking)",
265
-
266
- # Deepseek Models
267
- "Deepseek R1",
268
- "Deepseek R1 Fast",
269
- "Deepseek V3",
270
- "Deepseek v3 0324",
271
-
272
- # Google Models
273
- "Gemini 1.5 Flash",
274
- "Gemini 1.5 Pro",
275
- "Gemini 2.0 Flash",
276
- "Gemini 2.0 Pro",
277
- "Gemini 2.5 Pro",
278
-
279
- # Llama Models
280
- "Llama 3.1 405B",
281
- "Llama 3.1 70B Fast",
282
- "Llama 3.3 70B",
283
- "Llama 3.2 90B Vision",
284
- "Llama 4 Scout",
285
- "Llama 4 Maverick",
286
-
287
- # Mistral Models
288
- "Mistral Large",
289
- "Mistral Nemo",
290
- "Mixtral 8x22B",
291
-
292
- # Qwen Models
293
- "Qwen Max",
294
- "Qwen Plus",
295
- "Qwen Turbo",
296
- "QwQ 32B",
297
- "QwQ Plus",
298
-
299
- # XAI Models
300
- "Grok 2",
301
- "Grok 3",
302
- ]
303
-
304
- def __init__(
305
- self,
306
- timeout: Optional[int] = None,
307
- browser: str = "chrome"
308
- ):
309
- """
310
- Initialize the FreeAIChat client.
311
-
312
- Args:
313
- timeout: Request timeout in seconds (None for no timeout)
314
- browser: Browser to emulate in user agent
315
- """
316
- self.timeout = timeout
317
- self.api_endpoint = "https://freeaichatplayground.com/api/v1/chat/completions"
318
- self.session = requests.Session()
319
-
320
- # Initialize LitAgent for user agent generation
321
- agent = LitAgent()
322
- self.fingerprint = agent.generate_fingerprint(browser)
323
-
324
- # Initialize headers
325
- self.headers = {
326
- 'User-Agent': self.fingerprint["user_agent"],
327
- 'Accept': '*/*',
328
- 'Content-Type': 'application/json',
329
- 'Origin': 'https://freeaichatplayground.com',
330
- 'Referer': 'https://freeaichatplayground.com/',
331
- 'Sec-Fetch-Mode': 'cors',
332
- 'Sec-Fetch-Site': 'same-origin'
333
- }
334
-
335
- self.session.headers.update(self.headers)
336
-
337
- # Initialize the chat interface
338
- self.chat = Chat(self)
339
-
340
- def convert_model_name(self, model: str) -> str:
341
- """
342
- Convert model names to ones supported by FreeAIChat.
343
-
344
- Args:
345
- model: Model name to convert
346
-
347
- Returns:
348
- FreeAIChat model name
349
- """
350
- # If the model is already a valid FreeAIChat model, return it
351
- if model in self.AVAILABLE_MODELS:
352
- return model
353
-
354
- # Default to GPT 4o if model not found
355
- print(f"Warning: Unknown model '{model}'. Using 'GPT 4o' instead.")
356
- return "GPT 4o"
357
-
358
- @property
359
- def models(self):
360
- class _ModelList:
361
- def list(inner_self):
362
- return type(self).AVAILABLE_MODELS
363
- return _ModelList()