webscout 8.1__py3-none-any.whl → 8.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (60) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  14. webscout/Provider/AISEARCH/__init__.py +2 -1
  15. webscout/Provider/AISEARCH/scira_search.py +8 -4
  16. webscout/Provider/ExaChat.py +18 -8
  17. webscout/Provider/GithubChat.py +5 -1
  18. webscout/Provider/Glider.py +4 -2
  19. webscout/Provider/OPENAI/__init__.py +9 -1
  20. webscout/Provider/OPENAI/c4ai.py +22 -2
  21. webscout/Provider/OPENAI/chatgpt.py +549 -0
  22. webscout/Provider/OPENAI/deepinfra.py +1 -13
  23. webscout/Provider/OPENAI/e2b.py +1192 -0
  24. webscout/Provider/OPENAI/exaai.py +1 -16
  25. webscout/Provider/OPENAI/exachat.py +20 -8
  26. webscout/Provider/OPENAI/freeaichat.py +1 -4
  27. webscout/Provider/OPENAI/glider.py +3 -1
  28. webscout/Provider/OPENAI/llmchatco.py +3 -1
  29. webscout/Provider/OPENAI/opkfc.py +488 -0
  30. webscout/Provider/OPENAI/scirachat.py +11 -7
  31. webscout/Provider/OPENAI/standardinput.py +425 -0
  32. webscout/Provider/OPENAI/textpollinations.py +285 -0
  33. webscout/Provider/OPENAI/toolbaz.py +405 -0
  34. webscout/Provider/OPENAI/typegpt.py +1 -16
  35. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  36. webscout/Provider/OPENAI/venice.py +1 -16
  37. webscout/Provider/OPENAI/writecream.py +156 -0
  38. webscout/Provider/OPENAI/x0gpt.py +2 -20
  39. webscout/Provider/OPENAI/yep.py +2 -4
  40. webscout/Provider/StandardInput.py +278 -0
  41. webscout/Provider/TextPollinationsAI.py +27 -28
  42. webscout/Provider/Writecream.py +211 -0
  43. webscout/Provider/WritingMate.py +197 -0
  44. webscout/Provider/Youchat.py +30 -26
  45. webscout/Provider/__init__.py +10 -2
  46. webscout/Provider/koala.py +2 -2
  47. webscout/Provider/llmchatco.py +5 -0
  48. webscout/Provider/scira_chat.py +5 -2
  49. webscout/Provider/scnet.py +187 -0
  50. webscout/Provider/toolbaz.py +320 -0
  51. webscout/Provider/uncovr.py +3 -3
  52. webscout/conversation.py +32 -32
  53. webscout/version.py +1 -1
  54. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/METADATA +54 -3
  55. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/RECORD +59 -33
  56. webscout-8.2.1.dist-info/entry_points.txt +5 -0
  57. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/top_level.txt +1 -0
  58. webscout-8.1.dist-info/entry_points.txt +0 -3
  59. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/LICENSE.md +0 -0
  60. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,359 @@
1
+ import json
2
+ import random
3
+ from uuid import uuid4
4
+ from typing import Dict, Optional, Generator, Union, Any
5
+ from curl_cffi import requests
6
+
7
+ from webscout.AIbase import AISearch
8
+ from webscout import exceptions
9
+ from webscout.litagent import LitAgent
10
+
11
+
12
+ class Response:
13
+ """A wrapper class for Perplexity API responses.
14
+
15
+ This class automatically converts response objects to their text representation
16
+ when printed or converted to string.
17
+
18
+ Attributes:
19
+ text (str): The text content of the response
20
+
21
+ Example:
22
+ >>> response = Response("Hello, world!")
23
+ >>> print(response)
24
+ Hello, world!
25
+ >>> str(response)
26
+ 'Hello, world!'
27
+ """
28
+ def __init__(self, text: str):
29
+ self.text = text
30
+
31
+ def __str__(self):
32
+ return self.text
33
+
34
+ def __repr__(self):
35
+ return self.text
36
+
37
+
38
+ class Perplexity(AISearch):
39
+ """A class to interact with the Perplexity AI search API.
40
+
41
+ Perplexity provides a powerful search interface that returns AI-generated responses
42
+ based on web content. It supports both streaming and non-streaming responses,
43
+ multiple search modes, and model selection.
44
+
45
+ Basic Usage:
46
+ >>> from webscout import Perplexity
47
+ >>> ai = Perplexity()
48
+ >>> # Non-streaming example
49
+ >>> response = ai.search("What is Python?")
50
+ >>> print(response)
51
+ Python is a high-level programming language...
52
+
53
+ >>> # Streaming example
54
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
55
+ ... print(chunk, end="", flush=True)
56
+ Artificial Intelligence is...
57
+
58
+ >>> # Pro search with specific model (requires authentication via cookies)
59
+ >>> cookies = {"perplexity-user": "your_cookie_value"}
60
+ >>> ai_pro = Perplexity(cookies=cookies)
61
+ >>> response = ai_pro.search("Latest AI research", mode="pro", model="gpt-4o")
62
+ >>> print(response)
63
+
64
+ >>> # Raw response format
65
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
66
+ ... print(chunk)
67
+ {'text': 'Hello'}
68
+ {'text': ' there!'}
69
+
70
+ Args:
71
+ cookies (dict, optional): Cookies to use for authentication. Defaults to None.
72
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
73
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ cookies: Optional[Dict[str, str]] = None,
79
+ timeout: int = 60,
80
+ proxies: Optional[Dict[str, str]] = None
81
+ ):
82
+ """
83
+ Initialize the Perplexity client.
84
+
85
+ Args:
86
+ cookies (dict, optional): Cookies to use for authentication. Defaults to None.
87
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
88
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
89
+ """
90
+ self.timeout = timeout
91
+ self.agent = LitAgent()
92
+ self.session = requests.Session(headers={
93
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
94
+ 'accept-language': 'en-US,en;q=0.9',
95
+ 'cache-control': 'max-age=0',
96
+ 'dnt': '1',
97
+ 'priority': 'u=0, i',
98
+ 'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"',
99
+ 'sec-ch-ua-arch': '"x86"',
100
+ 'sec-ch-ua-bitness': '"64"',
101
+ 'sec-ch-ua-full-version': '"128.0.6613.120"',
102
+ 'sec-ch-ua-full-version-list': '"Not;A=Brand";v="24.0.0.0", "Chromium";v="128.0.6613.120"',
103
+ 'sec-ch-ua-mobile': '?0',
104
+ 'sec-ch-ua-model': '""',
105
+ 'sec-ch-ua-platform': '"Windows"',
106
+ 'sec-ch-ua-platform-version': '"19.0.0"',
107
+ 'sec-fetch-dest': 'document',
108
+ 'sec-fetch-mode': 'navigate',
109
+ 'sec-fetch-site': 'same-origin',
110
+ 'sec-fetch-user': '?1',
111
+ 'upgrade-insecure-requests': '1',
112
+ 'user-agent': self.agent.random(),
113
+ }, cookies=cookies or {}, impersonate='chrome')
114
+
115
+ # Apply proxies if provided
116
+ if proxies:
117
+ self.session.proxies.update(proxies)
118
+
119
+ # Initialize session with socket.io
120
+ self.timestamp = format(random.getrandbits(32), '08x')
121
+
122
+ # Get socket.io session ID
123
+ response = self.session.get(f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}')
124
+ self.sid = json.loads(response.text[1:])['sid']
125
+
126
+ # Initialize socket.io connection
127
+ assert (self.session.post(
128
+ f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}&sid={self.sid}',
129
+ data='40{"jwt":"anonymous-ask-user"}'
130
+ )).text == 'OK'
131
+
132
+ # Get session info
133
+ self.session.get('https://www.perplexity.ai/api/auth/session')
134
+
135
+ # Set default values
136
+ self.copilot = 0 if not cookies else float('inf')
137
+ self.file_upload = 0 if not cookies else float('inf')
138
+
139
+ def _extract_answer(self, response):
140
+ """
141
+ Extract the answer from the response.
142
+
143
+ Args:
144
+ response (dict): The response from Perplexity AI.
145
+
146
+ Returns:
147
+ str: The extracted answer text.
148
+ """
149
+ if not response:
150
+ return ""
151
+
152
+ # Find the FINAL step in the text array
153
+ final_step = None
154
+ if 'text' in response and isinstance(response['text'], list):
155
+ for step in response['text']:
156
+ if step.get('step_type') == 'FINAL' and 'content' in step and 'answer' in step['content']:
157
+ final_step = step
158
+ break
159
+
160
+ if not final_step:
161
+ return ""
162
+
163
+ try:
164
+ # Parse the answer JSON string
165
+ answer_json = json.loads(final_step['content']['answer'])
166
+ return answer_json.get('answer', '')
167
+ except (json.JSONDecodeError, KeyError):
168
+ return ""
169
+
170
+ def search(
171
+ self,
172
+ prompt: str,
173
+ mode: str = 'auto',
174
+ model: Optional[str] = None,
175
+ sources: Optional[list] = None,
176
+ stream: bool = False,
177
+ raw: bool = False,
178
+ language: str = 'en-US',
179
+ follow_up: Optional[Dict[str, Any]] = None,
180
+ incognito: bool = False
181
+ ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
182
+ """Search using the Perplexity API and get AI-generated responses.
183
+
184
+ This method sends a search query to Perplexity and returns the AI-generated response.
185
+ It supports both streaming and non-streaming modes, as well as raw response format.
186
+
187
+ Args:
188
+ prompt (str): The search query or prompt to send to the API.
189
+ mode (str, optional): Search mode. Options: 'auto', 'pro', 'reasoning', 'deep research'.
190
+ Defaults to 'auto'.
191
+ model (str, optional): Model to use. Available models depend on the mode. Defaults to None.
192
+ sources (list, optional): Sources to use. Options: 'web', 'scholar', 'social'.
193
+ Defaults to ['web'].
194
+ stream (bool, optional): If True, yields response chunks as they arrive.
195
+ If False, returns complete response. Defaults to False.
196
+ raw (bool, optional): If True, returns raw response dictionaries.
197
+ If False, returns Response objects that convert to text automatically.
198
+ Defaults to False.
199
+ language (str, optional): Language to use. Defaults to 'en-US'.
200
+ follow_up (dict, optional): Follow-up information. Defaults to None.
201
+ incognito (bool, optional): Whether to use incognito mode. Defaults to False.
202
+
203
+ Returns:
204
+ If stream=True: Generator yielding response chunks as they arrive
205
+ If stream=False: Complete response
206
+
207
+ Raises:
208
+ ValueError: If invalid mode or model is provided
209
+ exceptions.APIConnectionError: If connection to API fails
210
+ exceptions.FailedToGenerateResponseError: If response generation fails
211
+ """
212
+ if sources is None:
213
+ sources = ['web']
214
+
215
+ # Validate inputs
216
+ if mode not in ['auto', 'pro', 'reasoning', 'deep research']:
217
+ raise ValueError('Search modes -> ["auto", "pro", "reasoning", "deep research"]')
218
+
219
+ if not all([source in ('web', 'scholar', 'social') for source in sources]):
220
+ raise ValueError('Sources -> ["web", "scholar", "social"]')
221
+
222
+ # Check if model is valid for the selected mode
223
+ valid_models = {
224
+ 'auto': [None],
225
+ 'pro': [None, 'sonar', 'gpt-4.5', 'gpt-4o', 'claude 3.7 sonnet', 'gemini 2.0 flash', 'grok-2'],
226
+ 'reasoning': [None, 'r1', 'o3-mini', 'claude 3.7 sonnet'],
227
+ 'deep research': [None]
228
+ }
229
+
230
+ if mode in valid_models and model not in valid_models[mode] and model is not None:
231
+ raise ValueError(f'Invalid model for {mode} mode. Valid models: {valid_models[mode]}')
232
+
233
+ # Prepare request data
234
+ json_data = {
235
+ 'query_str': prompt,
236
+ 'params': {
237
+ 'attachments': follow_up['attachments'] if follow_up else [],
238
+ 'frontend_context_uuid': str(uuid4()),
239
+ 'frontend_uuid': str(uuid4()),
240
+ 'is_incognito': incognito,
241
+ 'language': language,
242
+ 'last_backend_uuid': follow_up['backend_uuid'] if follow_up else None,
243
+ 'mode': 'concise' if mode == 'auto' else 'copilot',
244
+ 'model_preference': {
245
+ 'auto': {
246
+ None: 'turbo'
247
+ },
248
+ 'pro': {
249
+ None: 'pplx_pro',
250
+ 'sonar': 'experimental',
251
+ 'gpt-4.5': 'gpt45',
252
+ 'gpt-4o': 'gpt4o',
253
+ 'claude 3.7 sonnet': 'claude2',
254
+ 'gemini 2.0 flash': 'gemini2flash',
255
+ 'grok-2': 'grok'
256
+ },
257
+ 'reasoning': {
258
+ None: 'pplx_reasoning',
259
+ 'r1': 'r1',
260
+ 'o3-mini': 'o3mini',
261
+ 'claude 3.7 sonnet': 'claude37sonnetthinking'
262
+ },
263
+ 'deep research': {
264
+ None: 'pplx_alpha'
265
+ }
266
+ }[mode][model],
267
+ 'source': 'default',
268
+ 'sources': sources,
269
+ 'version': '2.18'
270
+ }
271
+ }
272
+
273
+ try:
274
+ # Make the request
275
+ resp = self.session.post(
276
+ 'https://www.perplexity.ai/rest/sse/perplexity_ask',
277
+ json=json_data,
278
+ stream=True,
279
+ timeout=self.timeout
280
+ )
281
+
282
+ if resp.status_code != 200:
283
+ raise exceptions.APIConnectionError(f"API returned status code {resp.status_code}")
284
+
285
+ # Define streaming response handler
286
+ def stream_response():
287
+ for chunk in resp.iter_lines(delimiter=b'\r\n\r\n'):
288
+ content = chunk.decode('utf-8')
289
+ if content.startswith('event: message\r\n'):
290
+ content_json = json.loads(content[len('event: message\r\ndata: '):])
291
+ if 'text' in content_json:
292
+ try:
293
+ # If text is a string, try to parse it as JSON
294
+ if isinstance(content_json['text'], str):
295
+ content_json['text'] = json.loads(content_json['text'])
296
+ except json.JSONDecodeError:
297
+ pass
298
+
299
+ if raw:
300
+ yield content_json
301
+ else:
302
+ # For non-raw responses, extract text from each chunk
303
+ if 'text' in content_json and isinstance(content_json['text'], list):
304
+ for step in content_json['text']:
305
+ if step.get('type') == 'answer' and 'value' in step:
306
+ yield Response(step['value'])
307
+ elif step.get('type') == 'thinking' and 'value' in step:
308
+ yield Response(step['value'])
309
+ elif content.startswith('event: end_of_stream\r\n'):
310
+ return
311
+
312
+ # Handle streaming or non-streaming response
313
+ if stream:
314
+ return stream_response()
315
+ else:
316
+ chunks = []
317
+ final_response = None
318
+
319
+ for chunk in resp.iter_lines(delimiter=b'\r\n\r\n'):
320
+ content = chunk.decode('utf-8')
321
+ if content.startswith('event: message\r\n'):
322
+ content_json = json.loads(content[len('event: message\r\ndata: '):])
323
+ if 'text' in content_json:
324
+ try:
325
+ # If text is a string, try to parse it as JSON
326
+ if isinstance(content_json['text'], str):
327
+ content_json['text'] = json.loads(content_json['text'])
328
+ except json.JSONDecodeError:
329
+ pass
330
+ chunks.append(content_json)
331
+ final_response = content_json
332
+ elif content.startswith('event: end_of_stream\r\n'):
333
+ # Process the final response to extract the answer
334
+ if final_response:
335
+ answer_text = self._extract_answer(final_response)
336
+ return Response(answer_text) if not raw else final_response
337
+ elif chunks:
338
+ answer_text = self._extract_answer(chunks[-1])
339
+ return Response(answer_text) if not raw else chunks[-1]
340
+ else:
341
+ return Response("") if not raw else {}
342
+
343
+ # If we get here, something went wrong
344
+ raise exceptions.FailedToGenerateResponseError("Failed to get complete response")
345
+
346
+ except requests.RequestsError as e:
347
+ raise exceptions.APIConnectionError(f"Connection error: {str(e)}")
348
+ except json.JSONDecodeError:
349
+ raise exceptions.FailedToGenerateResponseError("Failed to parse response JSON")
350
+ except Exception as e:
351
+ raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
352
+
353
+
354
+ if __name__ == "__main__":
355
+ # Simple test
356
+ ai = Perplexity()
357
+ response = ai.search("What is Python?")
358
+ print(response)
359
+
@@ -6,4 +6,5 @@ from .monica_search import *
6
6
  from .webpilotai_search import *
7
7
  from .hika_search import *
8
8
  from .scira_search import *
9
- from .iask_search import *
9
+ from .iask_search import *
10
+ from .Perplexity import *
@@ -70,10 +70,14 @@ class Scira(AISearch):
70
70
 
71
71
  AVAILABLE_MODELS = {
72
72
  "scira-default": "Grok3",
73
- "scira-grok-3-mini": "Grok3-mini", # thinking model
74
- "scira-vision": "Grok2-Vision", # vision model
75
- "scira-claude": "Sonnet-3.7",
76
- "scira-optimus": "optimus",
73
+ "scira-grok-3-mini": "Grok3-mini", # thinking model
74
+ "scira-vision" : "Grok2-Vision", # vision model
75
+ "scira-4.1-mini": "GPT4.1-mini",
76
+ "scira-qwq": "QWQ-32B",
77
+ "scira-o4-mini": "o4-mini",
78
+ "scira-google": "gemini 2.5 flash"
79
+
80
+
77
81
  }
78
82
 
79
83
  def __init__(
@@ -11,11 +11,11 @@ from webscout.litagent import LitAgent
11
11
  # Model configurations
12
12
  MODEL_CONFIGS = {
13
13
  "exaanswer": {
14
- "endpoint": "https://exa-chat.vercel.app/api/exaanswer",
14
+ "endpoint": "https://ayle.chat/api/exaanswer",
15
15
  "models": ["exaanswer"],
16
16
  },
17
17
  "gemini": {
18
- "endpoint": "https://exa-chat.vercel.app/api/gemini",
18
+ "endpoint": "https://ayle.chat/api/gemini",
19
19
  "models": [
20
20
  "gemini-2.0-flash",
21
21
  "gemini-2.0-flash-exp-image-generation",
@@ -26,7 +26,7 @@ MODEL_CONFIGS = {
26
26
  ],
27
27
  },
28
28
  "openrouter": {
29
- "endpoint": "https://exa-chat.vercel.app/api/openrouter",
29
+ "endpoint": "https://ayle.chat/api/openrouter",
30
30
  "models": [
31
31
  "mistralai/mistral-small-3.1-24b-instruct:free",
32
32
  "deepseek/deepseek-r1:free",
@@ -36,7 +36,7 @@ MODEL_CONFIGS = {
36
36
  ],
37
37
  },
38
38
  "groq": {
39
- "endpoint": "https://exa-chat.vercel.app/api/groq",
39
+ "endpoint": "https://ayle.chat/api/groq",
40
40
  "models": [
41
41
  "deepseek-r1-distill-llama-70b",
42
42
  "deepseek-r1-distill-qwen-32b",
@@ -56,12 +56,18 @@ MODEL_CONFIGS = {
56
56
  ],
57
57
  },
58
58
  "cerebras": {
59
- "endpoint": "https://exa-chat.vercel.app/api/cerebras",
59
+ "endpoint": "https://ayle.chat/api/cerebras",
60
60
  "models": [
61
61
  "llama3.1-8b",
62
62
  "llama-3.3-70b"
63
63
  ],
64
64
  },
65
+ "xai": {
66
+ "endpoint": "https://ayle.chat/api/xai",
67
+ "models": [
68
+ "grok-3-mini-beta"
69
+ ],
70
+ },
65
71
  }
66
72
 
67
73
  class ExaChat(Provider):
@@ -71,6 +77,9 @@ class ExaChat(Provider):
71
77
  AVAILABLE_MODELS = [
72
78
  # ExaAnswer Models
73
79
  "exaanswer",
80
+
81
+ # XAI Models
82
+ "grok-3-mini-beta",
74
83
 
75
84
  # Gemini Models
76
85
  "gemini-2.0-flash",
@@ -106,7 +115,8 @@ class ExaChat(Provider):
106
115
 
107
116
  # Cerebras Models
108
117
  "llama3.1-8b",
109
- "llama-3.3-70b"
118
+ "llama-3.3-70b",
119
+
110
120
  ]
111
121
 
112
122
  def __init__(
@@ -150,8 +160,8 @@ class ExaChat(Provider):
150
160
  "accept": "*/*",
151
161
  "accept-language": "en-US,en;q=0.9",
152
162
  "content-type": "application/json",
153
- "origin": "https://exa-chat.vercel.app",
154
- "referer": "https://exa-chat.vercel.app/",
163
+ "origin": "https://ayle.chat/",
164
+ "referer": "https://ayle.chat/",
155
165
  "user-agent": self.agent.random(),
156
166
  }
157
167
 
@@ -25,7 +25,11 @@ class GithubChat(Provider):
25
25
  "claude-3.7-sonnet",
26
26
  "claude-3.7-sonnet-thought",
27
27
  "gemini-2.0-flash-001",
28
- "gemini-2.5-pro"
28
+ "gemini-2.5-pro",
29
+ "gpt-4.1",
30
+ "o4-mini"
31
+
32
+
29
33
 
30
34
  ]
31
35
 
@@ -13,9 +13,11 @@ class GliderAI(Provider):
13
13
  """
14
14
 
15
15
  AVAILABLE_MODELS = [
16
- "chat-llama-3-1-70b",
17
16
  "chat-llama-3-1-8b",
18
17
  "chat-llama-3-2-3b",
18
+ "chat-deepseek-r1-qwen-32b",
19
+ "chat-qwen-2-5-7b",
20
+ "chat-qwen-qwq-32b",
19
21
  "deepseek-ai/DeepSeek-R1",
20
22
  ]
21
23
 
@@ -30,7 +32,7 @@ class GliderAI(Provider):
30
32
  proxies: dict = {},
31
33
  history_offset: int = 10250,
32
34
  act: Optional[str] = None,
33
- model: str = "chat-llama-3-1-70b",
35
+ model: str = "chat-llama-3-1-8b",
34
36
  system_prompt: str = "You are a helpful AI assistant."
35
37
  ):
36
38
  """Initializes the GliderAI API client."""
@@ -14,4 +14,12 @@ from .yep import * # Add YEPCHAT
14
14
  from .heckai import *
15
15
  from .sonus import *
16
16
  from .exachat import *
17
- from .netwrck import *
17
+ from .netwrck import *
18
+ from .standardinput import *
19
+ from .writecream import *
20
+ from .toolbaz import *
21
+ from .uncovrAI import *
22
+ from .opkfc import *
23
+ from .chatgpt import *
24
+ from .textpollinations import *
25
+ from .e2b import *
@@ -13,8 +13,28 @@ from .utils import (
13
13
  get_system_prompt, get_last_user_message, format_prompt # Import format_prompt
14
14
  )
15
15
 
16
- # Import LitAgent for browser fingerprinting
17
- from webscout.litagent import LitAgent
16
+ # Attempt to import LitAgent, fallback if not available
17
+ try:
18
+ from webscout.litagent import LitAgent
19
+ except ImportError:
20
+ # Define a dummy LitAgent if webscout is not installed or accessible
21
+ class LitAgent:
22
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
23
+ # Return minimal default headers if LitAgent is unavailable
24
+ print("Warning: LitAgent not found. Using default minimal headers.")
25
+ return {
26
+ "accept": "*/*",
27
+ "accept_language": "en-US,en;q=0.9",
28
+ "platform": "Windows",
29
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
30
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
31
+ "browser_type": browser,
32
+ }
33
+ def random(self) -> str:
34
+ # Return a default user agent if LitAgent is unavailable
35
+ print("Warning: LitAgent not found. Using default user agent.")
36
+ return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
37
+
18
38
 
19
39
  class Completions(BaseCompletions):
20
40
  def __init__(self, client: 'C4AI'):