webscout 8.3.1__py3-none-any.whl → 8.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (77) hide show
  1. webscout/AIutel.py +46 -53
  2. webscout/Bing_search.py +418 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  5. webscout/Provider/GeminiProxy.py +140 -0
  6. webscout/Provider/MCPCore.py +78 -75
  7. webscout/Provider/OPENAI/BLACKBOXAI.py +1 -4
  8. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  9. webscout/Provider/OPENAI/README.md +2 -0
  10. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  11. webscout/Provider/OPENAI/__init__.py +15 -1
  12. webscout/Provider/OPENAI/autoproxy.py +332 -39
  13. webscout/Provider/OPENAI/base.py +15 -5
  14. webscout/Provider/OPENAI/e2b.py +0 -1
  15. webscout/Provider/OPENAI/mcpcore.py +109 -70
  16. webscout/Provider/OPENAI/scirachat.py +59 -51
  17. webscout/Provider/OPENAI/toolbaz.py +2 -9
  18. webscout/Provider/OPENAI/xenai.py +514 -0
  19. webscout/Provider/OPENAI/yep.py +8 -2
  20. webscout/Provider/TTI/__init__.py +1 -0
  21. webscout/Provider/TTI/bing.py +231 -0
  22. webscout/Provider/TTS/speechma.py +45 -39
  23. webscout/Provider/TogetherAI.py +366 -0
  24. webscout/Provider/XenAI.py +324 -0
  25. webscout/Provider/__init__.py +8 -3
  26. webscout/Provider/deepseek_assistant.py +378 -0
  27. webscout/auth/__init__.py +44 -0
  28. webscout/auth/api_key_manager.py +189 -0
  29. webscout/auth/auth_system.py +100 -0
  30. webscout/auth/config.py +76 -0
  31. webscout/auth/database.py +400 -0
  32. webscout/auth/exceptions.py +67 -0
  33. webscout/auth/middleware.py +248 -0
  34. webscout/auth/models.py +130 -0
  35. webscout/auth/providers.py +257 -0
  36. webscout/auth/rate_limiter.py +254 -0
  37. webscout/auth/request_models.py +127 -0
  38. webscout/auth/request_processing.py +226 -0
  39. webscout/auth/routes.py +526 -0
  40. webscout/auth/schemas.py +103 -0
  41. webscout/auth/server.py +312 -0
  42. webscout/auth/static/favicon.svg +11 -0
  43. webscout/auth/swagger_ui.py +203 -0
  44. webscout/auth/templates/components/authentication.html +237 -0
  45. webscout/auth/templates/components/base.html +103 -0
  46. webscout/auth/templates/components/endpoints.html +750 -0
  47. webscout/auth/templates/components/examples.html +491 -0
  48. webscout/auth/templates/components/footer.html +75 -0
  49. webscout/auth/templates/components/header.html +27 -0
  50. webscout/auth/templates/components/models.html +286 -0
  51. webscout/auth/templates/components/navigation.html +70 -0
  52. webscout/auth/templates/static/api.js +455 -0
  53. webscout/auth/templates/static/icons.js +168 -0
  54. webscout/auth/templates/static/main.js +784 -0
  55. webscout/auth/templates/static/particles.js +201 -0
  56. webscout/auth/templates/static/styles.css +3353 -0
  57. webscout/auth/templates/static/ui.js +374 -0
  58. webscout/auth/templates/swagger_ui.html +170 -0
  59. webscout/client.py +49 -3
  60. webscout/scout/core/scout.py +104 -26
  61. webscout/scout/element.py +139 -18
  62. webscout/swiftcli/core/cli.py +14 -3
  63. webscout/swiftcli/decorators/output.py +59 -9
  64. webscout/update_checker.py +31 -49
  65. webscout/version.py +1 -1
  66. webscout/webscout_search.py +4 -12
  67. webscout/webscout_search_async.py +3 -10
  68. webscout/yep_search.py +2 -11
  69. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/METADATA +41 -11
  70. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/RECORD +74 -36
  71. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/entry_points.txt +1 -1
  72. webscout/Provider/HF_space/__init__.py +0 -0
  73. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  74. webscout/Provider/OPENAI/api.py +0 -1320
  75. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/WHEEL +0 -0
  76. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/licenses/LICENSE.md +0 -0
  77. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,366 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ from typing import Any, Dict, Optional, Generator, Union
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.litagent import LitAgent
11
+
12
+ class TogetherAI(Provider):
13
+ """
14
+ A class to interact with the TogetherAI API.
15
+ """
16
+
17
+ AVAILABLE_MODELS = [
18
+ "Gryphe/MythoMax-L2-13b",
19
+ "Gryphe/MythoMax-L2-13b-Lite",
20
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
21
+ "Qwen/QwQ-32B",
22
+ "Qwen/Qwen2-72B-Instruct",
23
+ "Qwen/Qwen2-VL-72B-Instruct",
24
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
25
+ "Qwen/Qwen2.5-7B-Instruct-Turbo",
26
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
27
+ "Qwen/Qwen2.5-VL-72B-Instruct",
28
+ "Qwen/Qwen3-235B-A22B-fp8",
29
+ "Qwen/Qwen3-235B-A22B-fp8-tput",
30
+ "Rrrr/meta-llama/Llama-3-70b-chat-hf-6f9ad551",
31
+ "Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-03dc18e1",
32
+ "Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-6c92f39d",
33
+ "arcee-ai/arcee-blitz",
34
+ "arcee-ai/caller",
35
+ "arcee-ai/coder-large",
36
+ "arcee-ai/maestro-reasoning",
37
+ "arcee-ai/virtuoso-large",
38
+ "arcee-ai/virtuoso-medium-v2",
39
+ "arcee_ai/arcee-spotlight",
40
+ "blackbox/meta-llama-3-1-8b",
41
+ "deepseek-ai/DeepSeek-R1",
42
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
43
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
44
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
45
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
46
+ "deepseek-ai/DeepSeek-V3",
47
+ "deepseek-ai/DeepSeek-V3-p-dp",
48
+ "google/gemma-2-27b-it",
49
+ "google/gemma-2b-it",
50
+ "lgai/exaone-3-5-32b-instruct",
51
+ "lgai/exaone-deep-32b",
52
+ "marin-community/marin-8b-instruct",
53
+ "meta-llama/Llama-3-70b-chat-hf",
54
+ "meta-llama/Llama-3-8b-chat-hf",
55
+ "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
56
+ "meta-llama/Llama-3.2-3B-Instruct-Turbo",
57
+ "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
58
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
59
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
60
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
61
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
62
+ "meta-llama/Llama-Vision-Free",
63
+ "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
64
+ "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
65
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
66
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
67
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
68
+ "mistralai/Mistral-7B-Instruct-v0.1",
69
+ "mistralai/Mistral-7B-Instruct-v0.2",
70
+ "mistralai/Mistral-7B-Instruct-v0.3",
71
+ "mistralai/Mistral-Small-24B-Instruct-2501",
72
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
73
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
74
+ "perplexity-ai/r1-1776",
75
+ "roberizk@gmail.com/meta-llama/Llama-3-70b-chat-hf-26ee936b",
76
+ "roberizk@gmail.com/meta-llama/Meta-Llama-3-70B-Instruct-6feb41f7",
77
+ "roberizk@gmail.com/meta-llama/Meta-Llama-3-8B-Instruct-8ced8839",
78
+ "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
79
+ "scb10x/scb10x-llama3-1-typhoon2-8b-instruct",
80
+ "togethercomputer/MoA-1",
81
+ "togethercomputer/MoA-1-Turbo",
82
+ "togethercomputer/Refuel-Llm-V2",
83
+ "togethercomputer/Refuel-Llm-V2-Small",
84
+ ]
85
+
86
+ @staticmethod
87
+ def _togetherai_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
88
+ """Extracts content from TogetherAI stream JSON objects."""
89
+ if isinstance(chunk, dict):
90
+ return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
91
+ return None
92
+
93
+ def __init__(
94
+ self,
95
+ is_conversation: bool = True,
96
+ max_tokens: int = 2049,
97
+ timeout: int = 30,
98
+ intro: str = None,
99
+ filepath: str = None,
100
+ update_file: bool = True,
101
+ proxies: dict = {},
102
+ history_offset: int = 10250,
103
+ act: str = None,
104
+ model: str = "meta-llama/Llama-3.1-8B-Instruct-Turbo",
105
+ system_prompt: str = "You are a helpful assistant.",
106
+ browser: str = "chrome"
107
+ ):
108
+ """Initializes the TogetherAI API client."""
109
+ if model not in self.AVAILABLE_MODELS:
110
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
111
+
112
+ self.api_endpoint = "https://api.together.xyz/v1/chat/completions"
113
+ self.activation_endpoint = "https://www.codegeneration.ai/activate-v2"
114
+
115
+ # Initialize LitAgent
116
+ self.agent = LitAgent()
117
+ self.fingerprint = self.agent.generate_fingerprint(browser)
118
+
119
+ # Use the fingerprint for headers
120
+ self.headers = {
121
+ "Accept": self.fingerprint["accept"],
122
+ "Accept-Language": self.fingerprint["accept_language"],
123
+ "Content-Type": "application/json",
124
+ "Cache-Control": "no-cache",
125
+ "Origin": "https://www.codegeneration.ai",
126
+ "Pragma": "no-cache",
127
+ "Referer": "https://www.codegeneration.ai/",
128
+ "Sec-Fetch-Dest": "empty",
129
+ "Sec-Fetch-Mode": "cors",
130
+ "Sec-Fetch-Site": "same-site",
131
+ "User-Agent": self.fingerprint["user_agent"],
132
+ }
133
+
134
+ # Initialize curl_cffi Session
135
+ self.session = Session()
136
+ self.session.headers.update(self.headers)
137
+ self.session.proxies = proxies
138
+ self.system_prompt = system_prompt
139
+ self.is_conversation = is_conversation
140
+ self.max_tokens_to_sample = max_tokens
141
+ self.timeout = timeout
142
+ self.last_response = {}
143
+ self.model = model
144
+ self._api_key_cache = None
145
+
146
+ self.__available_optimizers = (
147
+ method
148
+ for method in dir(Optimizers)
149
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
150
+ )
151
+ Conversation.intro = (
152
+ AwesomePrompts().get_act(
153
+ act, raise_not_found=True, default=None, case_insensitive=True
154
+ )
155
+ if act
156
+ else intro or Conversation.intro
157
+ )
158
+
159
+ self.conversation = Conversation(
160
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
161
+ )
162
+ self.conversation.history_offset = history_offset
163
+
164
+ def refresh_identity(self, browser: str = None):
165
+ """
166
+ Refreshes the browser identity fingerprint.
167
+
168
+ Args:
169
+ browser: Specific browser to use for the new fingerprint
170
+ """
171
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
172
+ self.fingerprint = self.agent.generate_fingerprint(browser)
173
+
174
+ # Update headers with new fingerprint
175
+ self.headers.update({
176
+ "Accept": self.fingerprint["accept"],
177
+ "Accept-Language": self.fingerprint["accept_language"],
178
+ "User-Agent": self.fingerprint["user_agent"],
179
+ })
180
+
181
+ # Update session headers
182
+ self.session.headers.update(self.headers)
183
+
184
+ return self.fingerprint
185
+
186
+ def get_activation_key(self) -> str:
187
+ """Get API key from activation endpoint"""
188
+ if self._api_key_cache:
189
+ return self._api_key_cache
190
+
191
+ try:
192
+ response = self.session.get(
193
+ self.activation_endpoint,
194
+ headers={"Accept": "application/json"},
195
+ timeout=30
196
+ )
197
+ response.raise_for_status()
198
+ activation_data = response.json()
199
+ self._api_key_cache = activation_data["openAIParams"]["apiKey"]
200
+ return self._api_key_cache
201
+ except Exception as e:
202
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get activation key: {e}")
203
+
204
+ def ask(
205
+ self,
206
+ prompt: str,
207
+ stream: bool = False,
208
+ raw: bool = False,
209
+ optimizer: str = None,
210
+ conversationally: bool = False,
211
+ ) -> Union[Dict[str, Any], Generator]:
212
+ """
213
+ Sends a prompt to the TogetherAI API and returns the response.
214
+ """
215
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
216
+ if optimizer:
217
+ if optimizer in self.__available_optimizers:
218
+ conversation_prompt = getattr(Optimizers, optimizer)(
219
+ conversation_prompt if conversationally else prompt
220
+ )
221
+ else:
222
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
223
+
224
+ # Get API key if not already set
225
+ if not self.headers.get("Authorization"):
226
+ api_key = self.get_activation_key()
227
+ self.headers["Authorization"] = f"Bearer {api_key}"
228
+ self.session.headers.update(self.headers)
229
+
230
+ # Payload construction
231
+ payload = {
232
+ "model": self.model,
233
+ "messages": [
234
+ {"role": "system", "content": self.system_prompt},
235
+ {"role": "user", "content": conversation_prompt},
236
+ ],
237
+ "stream": stream
238
+ }
239
+
240
+ def for_stream():
241
+ streaming_text = ""
242
+ try:
243
+ response = self.session.post(
244
+ self.api_endpoint,
245
+ json=payload,
246
+ stream=True,
247
+ timeout=self.timeout,
248
+ impersonate="chrome110"
249
+ )
250
+ response.raise_for_status()
251
+
252
+ # Use sanitize_stream
253
+ processed_stream = sanitize_stream(
254
+ data=response.iter_content(chunk_size=None),
255
+ intro_value="data:",
256
+ to_json=True,
257
+ skip_markers=["[DONE]"],
258
+ content_extractor=self._togetherai_extractor,
259
+ yield_raw_on_error=False
260
+ )
261
+
262
+ for content_chunk in processed_stream:
263
+ if content_chunk and isinstance(content_chunk, str):
264
+ streaming_text += content_chunk
265
+ resp = dict(text=content_chunk)
266
+ yield resp if not raw else content_chunk
267
+
268
+ except CurlError as e:
269
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
270
+ except Exception as e:
271
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
272
+ finally:
273
+ if streaming_text:
274
+ self.last_response = {"text": streaming_text}
275
+ self.conversation.update_chat_history(prompt, streaming_text)
276
+
277
+ def for_non_stream():
278
+ try:
279
+ response = self.session.post(
280
+ self.api_endpoint,
281
+ json=payload,
282
+ timeout=self.timeout,
283
+ impersonate="chrome110"
284
+ )
285
+ response.raise_for_status()
286
+
287
+ response_text = response.text
288
+
289
+ # Use sanitize_stream to parse the non-streaming JSON response
290
+ processed_stream = sanitize_stream(
291
+ data=response_text,
292
+ to_json=True,
293
+ intro_value=None,
294
+ content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
295
+ yield_raw_on_error=False
296
+ )
297
+ content = next(processed_stream, None)
298
+ content = content if isinstance(content, str) else ""
299
+
300
+ self.last_response = {"text": content}
301
+ self.conversation.update_chat_history(prompt, content)
302
+ return self.last_response if not raw else content
303
+
304
+ except CurlError as e:
305
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
306
+ except Exception as e:
307
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
308
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
309
+
310
+ return for_stream() if stream else for_non_stream()
311
+
312
+ def chat(
313
+ self,
314
+ prompt: str,
315
+ stream: bool = False,
316
+ optimizer: str = None,
317
+ conversationally: bool = False,
318
+ ) -> Union[str, Generator[str, None, None]]:
319
+ """Generate response `str`"""
320
+ def for_stream_chat():
321
+ gen = self.ask(
322
+ prompt, stream=True, raw=False,
323
+ optimizer=optimizer, conversationally=conversationally
324
+ )
325
+ for response_dict in gen:
326
+ yield self.get_message(response_dict)
327
+
328
+ def for_non_stream_chat():
329
+ response_data = self.ask(
330
+ prompt, stream=False, raw=False,
331
+ optimizer=optimizer, conversationally=conversationally
332
+ )
333
+ return self.get_message(response_data)
334
+
335
+ return for_stream_chat() if stream else for_non_stream_chat()
336
+
337
+ def get_message(self, response: dict) -> str:
338
+ """Retrieves message only from response"""
339
+ assert isinstance(response, dict), "Response should be of dict data-type only"
340
+ return response["text"]
341
+
342
+
343
+ if __name__ == "__main__":
344
+ print("-" * 80)
345
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
346
+ print("-" * 80)
347
+
348
+ for model in TogetherAI.AVAILABLE_MODELS:
349
+ try:
350
+ test_ai = TogetherAI(model=model, timeout=60)
351
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
352
+ response_text = ""
353
+ for chunk in response:
354
+ response_text += chunk
355
+
356
+ if response_text and len(response_text.strip()) > 0:
357
+ status = "✓"
358
+ # Clean and truncate response
359
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
360
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
361
+ else:
362
+ status = "✗"
363
+ display_text = "Empty or invalid response"
364
+ print(f"\r{model:<50} {status:<10} {display_text}")
365
+ except Exception as e:
366
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -0,0 +1,324 @@
1
+ import json
2
+ import uuid
3
+ import random
4
+ import string
5
+ from typing import Any, Dict, Generator, Union
6
+ import requests
7
+ import warnings
8
+ import urllib3
9
+
10
+ from webscout.AIutel import Optimizers
11
+ from webscout.AIutel import Conversation
12
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
13
+ from webscout.AIbase import Provider
14
+ from webscout import exceptions
15
+ from webscout.litagent import LitAgent
16
+
17
+ # Suppress only the single InsecureRequestWarning from urllib3 needed for verify=False
18
+ warnings.filterwarnings("ignore", category=urllib3.exceptions.InsecureRequestWarning)
19
+
20
+ class XenAI(Provider):
21
+
22
+ # Add more models if known, starting with the one from the example
23
+ AVAILABLE_MODELS = [
24
+ "gemini-2.5-pro-preview-05-06",
25
+ "gemini-2.5-flash-preview-05-20",
26
+ "o4-mini-high",
27
+ "grok-3-mini-fast-beta",
28
+ "grok-3-fast-beta",
29
+ "gpt-4.1",
30
+ "o3-high",
31
+ "gpt-4o-search-preview",
32
+ "gpt-4o",
33
+ "claude-sonnet-4-20250514",
34
+ "claude-sonnet-4-20250514-thinking",
35
+ "deepseek-ai/DeepSeek-V3-0324",
36
+ "deepseek-ai/DeepSeek-R1-0528",
37
+ "groq/deepseek-r1-distill-llama-70b",
38
+ "deepseek-ai/DeepSeek-Prover-V2-671B",
39
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
40
+ "meta-llama/llama-4-scout-17b-16e-instruct",
41
+ "cognitivecomputations/Dolphin3.0-Mistral-24B",
42
+ "sonar-pro",
43
+ "gpt-4o-mini",
44
+ "gemini-2.0-flash-lite-preview-02-05",
45
+ "claude-3-7-sonnet-20250219",
46
+ "claude-3-7-sonnet-20250219-thinking",
47
+ "claude-opus-4-20250514",
48
+ "claude-opus-4-20250514-thinking",
49
+ "chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8",
50
+ "chutesai/Llama-4-Scout-17B-16E-Instruct",
51
+ ]
52
+
53
+ def __init__(
54
+ self,
55
+ is_conversation: bool = True,
56
+ max_tokens: int = 2048,
57
+ timeout: int = 60,
58
+ intro: str = None,
59
+ filepath: str = None,
60
+ update_file: bool = True,
61
+ proxies: dict = {},
62
+ history_offset: int = 10250,
63
+ act: str = None,
64
+ model: str = "gemini-2.5-pro-preview-05-06",
65
+ system_prompt: str = "You are a helpful assistant.",
66
+ ):
67
+ """Initializes the xenai API client."""
68
+ if model not in self.AVAILABLE_MODELS:
69
+ print(f"Warning: Model '{model}' is not listed in AVAILABLE_MODELS. Proceeding with the provided model.")
70
+
71
+ self.api_endpoint = "https://chat.xenai.tech/api/chat/completions"
72
+
73
+ self.model = model
74
+ self.system_prompt = system_prompt
75
+
76
+ # Initialize requests Session
77
+ self.session = requests.Session()
78
+
79
+ # Set up headers based on the provided request
80
+ self.headers = {
81
+ **LitAgent().generate_fingerprint(),
82
+ 'origin': 'https://chat.xenai.tech',
83
+ 'referer': 'https://chat.xenai.tech/',
84
+ }
85
+
86
+ # Apply headers, proxies, and cookies to the session
87
+ self.session.headers.update(self.headers)
88
+ self.session.proxies.update(proxies)
89
+ # Always disable SSL verification for this session
90
+ self.session.verify = False
91
+
92
+ # Provider settings
93
+ self.is_conversation = is_conversation
94
+ self.max_tokens_to_sample = max_tokens
95
+ self.timeout = timeout
96
+ self.last_response = {}
97
+
98
+ # Initialize optimizers
99
+ self.__available_optimizers = (
100
+ method
101
+ for method in dir(Optimizers)
102
+ if callable(getattr(Optimizers, method))
103
+ and not method.startswith("__")
104
+ )
105
+ Conversation.intro = (
106
+ AwesomePrompts().get_act(
107
+ act, raise_not_found=True, default=None, case_insensitive=True
108
+ )
109
+ if act
110
+ else intro or Conversation.intro
111
+ )
112
+ self.conversation = Conversation(
113
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
114
+ )
115
+ self.conversation.history_offset = history_offset
116
+
117
+ # Token handling: always auto-fetch token, no cookies logic
118
+ self.token = self._auto_fetch_token()
119
+
120
+ # Set the Authorization header for the session
121
+ self.session.headers.update({
122
+ 'authorization': f'Bearer {self.token}',
123
+ })
124
+
125
+ def _auto_fetch_token(self):
126
+ """Automatically fetch a token from the signup endpoint using requests."""
127
+ session = requests.Session()
128
+ session.verify = False # Always disable SSL verification for this session
129
+ def random_string(length=8):
130
+ return ''.join(random.choices(string.ascii_lowercase, k=length))
131
+ name = random_string(8)
132
+ email = f"{name}@gmail.com"
133
+ password = email
134
+ profile_image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAAAXNSR0IArs4c6QAAAkRJREFUeF7tmDFOw0AUBdcSiIaKM3CKHIQ7UHEISq5AiUTFHYC0XADoTRsJEZFEjhFIaYAim92fjGFS736/zOTZzjavl0d98oMh0CgE4+IriEJYPhQC86EQhdAIwPL4DFEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg2BCfkAIqwAA94KZ/EAAAAASUVORK5CYII="
135
+ payload = {
136
+ "name": name,
137
+ "email": email,
138
+ "password": password,
139
+ "profile_image_url": profile_image_url
140
+ }
141
+ headers = {
142
+ **LitAgent().generate_fingerprint(),
143
+ 'origin': 'https://chat.xenai.tech',
144
+ 'referer': 'https://chat.xenai.tech/auth',
145
+ }
146
+ try:
147
+ resp = session.post(
148
+ "https://chat.xenai.tech/api/v1/auths/signup",
149
+ headers=headers,
150
+ json=payload,
151
+ timeout=30,
152
+ verify=False # Disable SSL verification for testing
153
+ )
154
+ if resp.ok:
155
+ data = resp.json()
156
+ token = data.get("token")
157
+ if token:
158
+ return token
159
+ set_cookie = resp.headers.get("set-cookie", "")
160
+ if "token=" in set_cookie:
161
+ return set_cookie.split("token=")[1].split(";")[0]
162
+ raise exceptions.FailedToGenerateResponseError(f"Failed to auto-fetch token: {resp.status_code} {resp.text}")
163
+ except Exception as e:
164
+ raise exceptions.FailedToGenerateResponseError(f"Token auto-fetch failed: {e}")
165
+
166
+ def ask(
167
+ self,
168
+ prompt: str,
169
+ stream: bool = False,
170
+ raw: bool = False,
171
+ optimizer: str = None,
172
+ conversationally: bool = False,
173
+ **kwargs
174
+ ) -> Union[Dict[str, Any], Generator]:
175
+ """Sends a prompt to the xenai API and returns the response."""
176
+
177
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
178
+
179
+ if optimizer:
180
+ if optimizer in self.__available_optimizers:
181
+ conversation_prompt = getattr(Optimizers, optimizer)(
182
+ conversation_prompt if conversationally else prompt
183
+ )
184
+ else:
185
+ raise exceptions.InvalidOptimizerError(
186
+ f"Optimizer is not one of {self.__available_optimizers}"
187
+ )
188
+
189
+ chat_id = kwargs.get("chat_id", str(uuid.uuid4()))
190
+ message_id = str(uuid.uuid4())
191
+
192
+ payload = {
193
+ "stream": stream,
194
+ "model": self.model,
195
+ "messages": [
196
+ {"role": "system", "content": self.system_prompt},
197
+ {"role": "user", "content": conversation_prompt}
198
+ ],
199
+ "params": kwargs.get("params", {}),
200
+ "tool_servers": kwargs.get("tool_servers", []),
201
+ "features": kwargs.get("features", {"web_search": False}),
202
+ "chat_id": chat_id,
203
+ "id": message_id,
204
+ "stream_options": kwargs.get("stream_options", {"include_usage": True})
205
+ }
206
+
207
+ def for_stream():
208
+ streaming_text = ""
209
+ try:
210
+ response = self.session.post(
211
+ self.api_endpoint,
212
+ json=payload,
213
+ stream=True,
214
+ timeout=self.timeout,
215
+ verify=False # Always disable SSL verification for this request
216
+ )
217
+ response.raise_for_status()
218
+
219
+ # Use sanitize_stream
220
+ processed_stream = sanitize_stream(
221
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
222
+ intro_value="data:",
223
+ to_json=True, # Stream sends JSON
224
+ skip_markers=["[DONE]"],
225
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
226
+ yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
227
+ )
228
+
229
+ for content_chunk in processed_stream:
230
+ # content_chunk is the string extracted by the content_extractor
231
+ if content_chunk and isinstance(content_chunk, str):
232
+ streaming_text += content_chunk
233
+ yield dict(text=content_chunk) if not raw else content_chunk
234
+
235
+ self.last_response = {"text": streaming_text}
236
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
237
+
238
+ except requests.RequestException as e:
239
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (requests): {e}") from e
240
+ except Exception as e:
241
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
242
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
243
+
244
+ def for_non_stream():
245
+ full_text = ""
246
+ try:
247
+ stream_generator = self.ask(
248
+ prompt, stream=True, raw=False, optimizer=optimizer, conversationally=conversationally, **kwargs
249
+ )
250
+ for chunk_data in stream_generator:
251
+ if isinstance(chunk_data, dict):
252
+ full_text += chunk_data["text"]
253
+ elif isinstance(chunk_data, str):
254
+ full_text += chunk_data
255
+ except requests.RequestException as e:
256
+ raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response (requests): {str(e)}") from e
257
+ except Exception as e:
258
+ raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {str(e)}") from e
259
+
260
+ return full_text if raw else self.last_response
261
+
262
+ return for_stream() if stream else for_non_stream()
263
+
264
+ def chat(
265
+ self,
266
+ prompt: str,
267
+ stream: bool = False,
268
+ optimizer: str = None,
269
+ conversationally: bool = False,
270
+ **kwargs
271
+ ) -> Union[str, Generator[str, None, None]]:
272
+ """Generates a response from the xenai API."""
273
+
274
+ def for_stream_chat() -> Generator[str, None, None]:
275
+ gen = self.ask(
276
+ prompt, stream=True, raw=False,
277
+ optimizer=optimizer, conversationally=conversationally, **kwargs
278
+ )
279
+ for response_dict in gen:
280
+ yield self.get_message(response_dict)
281
+
282
+ def for_non_stream_chat() -> str:
283
+ response_data = self.ask(
284
+ prompt, stream=False, raw=False,
285
+ optimizer=optimizer, conversationally=conversationally, **kwargs
286
+ )
287
+ return self.get_message(response_data)
288
+
289
+ return for_stream_chat() if stream else for_non_stream_chat()
290
+
291
+ def get_message(self, response: Dict[str, Any]) -> str:
292
+ """Extracts the message from the API response."""
293
+ assert isinstance(response, dict), "Response should be of dict data-type only"
294
+ return response.get("text", "")
295
+
296
+ # Example usage (no cookies file needed)
297
+ if __name__ == "__main__":
298
+ from rich import print
299
+
300
+ print("-" * 80)
301
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
302
+ print("-" * 80)
303
+
304
+ for model in XenAI.AVAILABLE_MODELS:
305
+ try:
306
+ test_ai = XenAI(model=model, timeout=60)
307
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
308
+ response_text = ""
309
+ # Accumulate the response text without printing in the loop
310
+ for chunk in response:
311
+ response_text += chunk
312
+
313
+ if response_text and len(response_text.strip()) > 0:
314
+ status = "✓"
315
+ # Truncate response if too long
316
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
317
+ else:
318
+ status = "✗"
319
+ display_text = "Empty or invalid response"
320
+ # Print the final status and response, overwriting the "Testing..." line
321
+ print(f"\r{model:<50} {status:<10} {display_text}")
322
+ except Exception as e:
323
+ # Print error, overwriting the "Testing..." line
324
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")