webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (114) hide show
  1. webscout/AIutel.py +180 -78
  2. webscout/Bing_search.py +417 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/ExaChat.py +84 -58
  8. webscout/Provider/GeminiProxy.py +140 -0
  9. webscout/Provider/HeckAI.py +85 -80
  10. webscout/Provider/Jadve.py +56 -50
  11. webscout/Provider/MCPCore.py +78 -75
  12. webscout/Provider/MiniMax.py +207 -0
  13. webscout/Provider/Nemotron.py +41 -13
  14. webscout/Provider/Netwrck.py +34 -51
  15. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
  16. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  17. webscout/Provider/OPENAI/MiniMax.py +298 -0
  18. webscout/Provider/OPENAI/README.md +32 -29
  19. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  20. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  21. webscout/Provider/OPENAI/__init__.py +17 -1
  22. webscout/Provider/OPENAI/autoproxy.py +1067 -39
  23. webscout/Provider/OPENAI/base.py +17 -76
  24. webscout/Provider/OPENAI/deepinfra.py +42 -108
  25. webscout/Provider/OPENAI/e2b.py +0 -1
  26. webscout/Provider/OPENAI/flowith.py +179 -166
  27. webscout/Provider/OPENAI/friendli.py +233 -0
  28. webscout/Provider/OPENAI/mcpcore.py +109 -70
  29. webscout/Provider/OPENAI/monochat.py +329 -0
  30. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  31. webscout/Provider/OPENAI/scirachat.py +59 -51
  32. webscout/Provider/OPENAI/toolbaz.py +3 -9
  33. webscout/Provider/OPENAI/typegpt.py +1 -1
  34. webscout/Provider/OPENAI/utils.py +19 -42
  35. webscout/Provider/OPENAI/x0gpt.py +14 -2
  36. webscout/Provider/OPENAI/xenai.py +514 -0
  37. webscout/Provider/OPENAI/yep.py +8 -2
  38. webscout/Provider/OpenGPT.py +54 -32
  39. webscout/Provider/PI.py +58 -84
  40. webscout/Provider/StandardInput.py +32 -13
  41. webscout/Provider/TTI/README.md +9 -9
  42. webscout/Provider/TTI/__init__.py +3 -1
  43. webscout/Provider/TTI/aiarta.py +92 -78
  44. webscout/Provider/TTI/bing.py +231 -0
  45. webscout/Provider/TTI/infip.py +212 -0
  46. webscout/Provider/TTI/monochat.py +220 -0
  47. webscout/Provider/TTS/speechma.py +45 -39
  48. webscout/Provider/TeachAnything.py +11 -3
  49. webscout/Provider/TextPollinationsAI.py +78 -70
  50. webscout/Provider/TogetherAI.py +350 -0
  51. webscout/Provider/Venice.py +37 -46
  52. webscout/Provider/VercelAI.py +27 -24
  53. webscout/Provider/WiseCat.py +35 -35
  54. webscout/Provider/WrDoChat.py +22 -26
  55. webscout/Provider/WritingMate.py +26 -22
  56. webscout/Provider/XenAI.py +324 -0
  57. webscout/Provider/__init__.py +10 -5
  58. webscout/Provider/deepseek_assistant.py +378 -0
  59. webscout/Provider/granite.py +48 -57
  60. webscout/Provider/koala.py +51 -39
  61. webscout/Provider/learnfastai.py +49 -64
  62. webscout/Provider/llmchat.py +79 -93
  63. webscout/Provider/llmchatco.py +63 -78
  64. webscout/Provider/multichat.py +51 -40
  65. webscout/Provider/oivscode.py +1 -1
  66. webscout/Provider/scira_chat.py +159 -96
  67. webscout/Provider/scnet.py +13 -13
  68. webscout/Provider/searchchat.py +13 -13
  69. webscout/Provider/sonus.py +12 -11
  70. webscout/Provider/toolbaz.py +25 -8
  71. webscout/Provider/turboseek.py +41 -42
  72. webscout/Provider/typefully.py +27 -12
  73. webscout/Provider/typegpt.py +41 -46
  74. webscout/Provider/uncovr.py +55 -90
  75. webscout/Provider/x0gpt.py +33 -17
  76. webscout/Provider/yep.py +79 -96
  77. webscout/auth/__init__.py +55 -0
  78. webscout/auth/api_key_manager.py +189 -0
  79. webscout/auth/auth_system.py +100 -0
  80. webscout/auth/config.py +76 -0
  81. webscout/auth/database.py +400 -0
  82. webscout/auth/exceptions.py +67 -0
  83. webscout/auth/middleware.py +248 -0
  84. webscout/auth/models.py +130 -0
  85. webscout/auth/providers.py +279 -0
  86. webscout/auth/rate_limiter.py +254 -0
  87. webscout/auth/request_models.py +127 -0
  88. webscout/auth/request_processing.py +226 -0
  89. webscout/auth/routes.py +550 -0
  90. webscout/auth/schemas.py +103 -0
  91. webscout/auth/server.py +367 -0
  92. webscout/client.py +121 -70
  93. webscout/litagent/Readme.md +68 -55
  94. webscout/litagent/agent.py +99 -9
  95. webscout/scout/core/scout.py +104 -26
  96. webscout/scout/element.py +139 -18
  97. webscout/swiftcli/core/cli.py +14 -3
  98. webscout/swiftcli/decorators/output.py +59 -9
  99. webscout/update_checker.py +31 -49
  100. webscout/version.py +1 -1
  101. webscout/webscout_search.py +4 -12
  102. webscout/webscout_search_async.py +3 -10
  103. webscout/yep_search.py +2 -11
  104. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
  105. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
  106. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
  107. webscout/Provider/HF_space/__init__.py +0 -0
  108. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  109. webscout/Provider/OPENAI/api.py +0 -1320
  110. webscout/Provider/TTI/fastflux.py +0 -233
  111. webscout/Provider/Writecream.py +0 -246
  112. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  113. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  114. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -106,7 +106,6 @@ class WiseCat(Provider):
106
106
  raise Exception(
107
107
  f"Optimizer is not one of {self.__available_optimizers}"
108
108
  )
109
-
110
109
  payload = {
111
110
  "id": "ephemeral",
112
111
  "messages": [
@@ -121,52 +120,49 @@ class WiseCat(Provider):
121
120
  ],
122
121
  "selectedChatModel": self.model
123
122
  }
124
-
125
123
  def for_stream():
126
- try: # Add try block for CurlError
127
- # Use curl_cffi session post with impersonate
124
+ try:
128
125
  response = self.session.post(
129
126
  self.api_endpoint,
130
127
  headers=self.headers,
131
128
  json=payload,
132
129
  stream=True,
133
130
  timeout=self.timeout,
134
- impersonate="chrome120" # Add impersonate
131
+ impersonate="chrome120"
135
132
  )
136
133
  if not response.ok:
137
134
  error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
138
135
  raise exceptions.FailedToGenerateResponseError(error_msg)
139
-
140
136
  streaming_text = ""
141
- # Use sanitize_stream with the custom extractor
142
137
  processed_stream = sanitize_stream(
143
- data=response.iter_content(chunk_size=None), # Pass byte iterator
144
- intro_value=None, # No simple prefix to remove here
145
- to_json=False, # Content is not JSON
146
- content_extractor=self._wisecat_extractor # Use the specific extractor
138
+ data=response.iter_content(chunk_size=None),
139
+ intro_value=None,
140
+ to_json=False,
141
+ content_extractor=self._wisecat_extractor,
142
+ raw=raw
147
143
  )
148
-
149
144
  for content_chunk in processed_stream:
150
- if content_chunk and isinstance(content_chunk, str):
151
- streaming_text += content_chunk
152
- yield content_chunk if raw else dict(text=content_chunk)
153
-
154
- self.last_response.update(dict(text=streaming_text)) # Use streaming_text here
145
+ # Always yield as string, even in raw mode
146
+ if isinstance(content_chunk, bytes):
147
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
148
+ if raw:
149
+ yield content_chunk
150
+ else:
151
+ if content_chunk and isinstance(content_chunk, str):
152
+ streaming_text += content_chunk
153
+ yield dict(text=content_chunk)
154
+ self.last_response.update(dict(text=streaming_text))
155
155
  self.conversation.update_chat_history(
156
156
  prompt, self.get_message(self.last_response)
157
157
  )
158
- except CurlError as e: # Catch CurlError
158
+ except CurlError as e:
159
159
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
160
- except Exception as e: # Catch other potential exceptions
160
+ except Exception as e:
161
161
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
162
-
163
-
164
162
  def for_non_stream():
165
- # This function implicitly uses the updated for_stream
166
163
  for _ in for_stream():
167
164
  pass
168
165
  return self.last_response
169
-
170
166
  return for_stream() if stream else for_non_stream()
171
167
 
172
168
  def chat(
@@ -175,24 +171,28 @@ class WiseCat(Provider):
175
171
  stream: bool = False,
176
172
  optimizer: str = None,
177
173
  conversationally: bool = False,
174
+ raw: bool = False, # Added raw parameter
178
175
  ) -> str:
179
- """Generate response `str`"""
180
176
  def for_stream():
181
177
  for response in self.ask(
182
- prompt, True, optimizer=optimizer, conversationally=conversationally
178
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
183
179
  ):
184
- yield self.get_message(response)
185
-
180
+ if raw:
181
+ yield response
182
+ else:
183
+ yield self.get_message(response)
186
184
  def for_non_stream():
187
- return self.get_message(
188
- self.ask(
189
- prompt,
190
- False,
191
- optimizer=optimizer,
192
- conversationally=conversationally,
193
- )
185
+ result = self.ask(
186
+ prompt,
187
+ False,
188
+ raw=raw,
189
+ optimizer=optimizer,
190
+ conversationally=conversationally,
194
191
  )
195
-
192
+ if raw:
193
+ return result
194
+ else:
195
+ return self.get_message(result)
196
196
  return for_stream() if stream else for_non_stream()
197
197
 
198
198
  def get_message(self, response: dict) -> str:
@@ -244,7 +244,6 @@ class WrDoChat(Provider):
244
244
  def for_stream():
245
245
  try:
246
246
  self.headers["referer"] = f"https://oi.wr.do/chat/{chat_id}"
247
-
248
247
  response = self.session.post(
249
248
  self.api_endpoint,
250
249
  json=payload,
@@ -252,31 +251,27 @@ class WrDoChat(Provider):
252
251
  timeout=self.timeout,
253
252
  impersonate="chrome110"
254
253
  )
255
-
256
254
  if response.status_code == 401:
257
255
  raise exceptions.AuthenticationError("Authentication failed. Please check your cookies.")
258
-
259
256
  response.raise_for_status()
260
-
261
257
  streaming_response = ""
262
258
  has_content = False
263
-
264
- # Use sanitize_stream with the custom extractor
265
259
  processed_stream = sanitize_stream(
266
260
  data=response.iter_lines(),
267
261
  intro_value=None, # No intro to remove
268
262
  to_json=False, # Response is not JSON
269
263
  content_extractor=self._wrdo_extractor,
270
- yield_raw_on_error=False
264
+ yield_raw_on_error=False,
265
+ raw=raw
271
266
  )
272
-
273
267
  for content in processed_stream:
268
+ # Always yield as string, even in raw mode
269
+ if isinstance(content, bytes):
270
+ content = content.decode('utf-8', errors='ignore')
274
271
  if content and isinstance(content, str):
275
272
  streaming_response += content
276
273
  has_content = True
277
- yield {"text": content} if not raw else content
278
-
279
- # Only update conversation history if we received content
274
+ yield content if raw else {"text": content}
280
275
  if has_content:
281
276
  self.last_response = {"text": streaming_response}
282
277
  self.conversation.update_chat_history(
@@ -286,12 +281,10 @@ class WrDoChat(Provider):
286
281
  raise exceptions.FailedToGenerateResponseError(
287
282
  "No content received from API"
288
283
  )
289
-
290
284
  except CurlError as e:
291
285
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
292
286
  except Exception as e:
293
287
  raise exceptions.FailedToGenerateResponseError(f"An error occurred: {str(e)}")
294
-
295
288
  def for_non_stream():
296
289
  response_text = ""
297
290
  try:
@@ -303,9 +296,7 @@ class WrDoChat(Provider):
303
296
  except Exception as e:
304
297
  if not response_text:
305
298
  raise exceptions.FailedToGenerateResponseError(f"Failed to get response: {str(e)}")
306
-
307
299
  return response_text if raw else {"text": response_text}
308
-
309
300
  return for_stream() if stream else for_non_stream()
310
301
 
311
302
  def chat(
@@ -314,6 +305,7 @@ class WrDoChat(Provider):
314
305
  stream: bool = False,
315
306
  optimizer: str = None,
316
307
  conversationally: bool = False,
308
+ raw: bool = False, # Added raw parameter
317
309
  ) -> Union[str, Generator[str, None, None]]:
318
310
  """
319
311
  Generate a response to a prompt.
@@ -329,20 +321,24 @@ class WrDoChat(Provider):
329
321
  """
330
322
  def for_stream():
331
323
  for response in self.ask(
332
- prompt, True, optimizer=optimizer, conversationally=conversationally
324
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
333
325
  ):
334
- yield self.get_message(response)
335
-
326
+ if raw:
327
+ yield response
328
+ else:
329
+ yield self.get_message(response)
336
330
  def for_non_stream():
337
- return self.get_message(
338
- self.ask(
339
- prompt,
340
- False,
341
- optimizer=optimizer,
342
- conversationally=conversationally,
343
- )
331
+ result = self.ask(
332
+ prompt,
333
+ False,
334
+ raw=raw,
335
+ optimizer=optimizer,
336
+ conversationally=conversationally,
344
337
  )
345
-
338
+ if raw:
339
+ return result
340
+ else:
341
+ return self.get_message(result)
346
342
  return for_stream() if stream else for_non_stream()
347
343
 
348
344
  def get_message(self, response: dict) -> str:
@@ -178,13 +178,20 @@ class WritingMate(Provider):
178
178
  data=response.iter_content(chunk_size=None), # Pass byte iterator
179
179
  intro_value=None, # No simple prefix
180
180
  to_json=False, # Content is not JSON
181
- content_extractor=self._writingmate_extractor # Use the specific extractor
181
+ content_extractor=self._writingmate_extractor, # Use the specific extractor
182
+ raw=raw
182
183
  )
183
184
 
184
185
  for content_chunk in processed_stream:
185
- if content_chunk and isinstance(content_chunk, str):
186
- streaming_text += content_chunk
187
- yield content_chunk if raw else dict(text=content_chunk)
186
+ # Always yield as string, even in raw mode
187
+ if isinstance(content_chunk, bytes):
188
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
189
+ if raw:
190
+ yield content_chunk
191
+ else:
192
+ if content_chunk and isinstance(content_chunk, str):
193
+ streaming_text += content_chunk
194
+ yield dict(text=content_chunk)
188
195
 
189
196
  self.last_response.update(dict(text=streaming_text))
190
197
  self.conversation.update_chat_history(
@@ -196,12 +203,10 @@ class WritingMate(Provider):
196
203
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
197
204
 
198
205
  def for_non_stream():
199
- # This function implicitly uses the updated for_stream
200
206
  for _ in for_stream():
201
207
  pass
202
208
  return self.last_response
203
209
 
204
- # Ensure stream defaults to True if not provided, matching original behavior
205
210
  effective_stream = stream if stream is not None else True
206
211
  return for_stream() if effective_stream else for_non_stream()
207
212
 
@@ -210,36 +215,35 @@ class WritingMate(Provider):
210
215
  prompt: str,
211
216
  stream: bool = False, # Default stream to False as per original chat method
212
217
  optimizer: str = None,
213
- conversationally: bool = False
218
+ conversationally: bool = False,
219
+ raw: bool = False, # Added raw parameter
214
220
  ) -> Union[str, Generator[str,None,None]]:
215
221
  if stream:
216
- # yield decoded text chunks
217
222
  def text_stream():
218
- # Call ask with stream=True, raw=False to get dicts
219
- for response_dict in self.ask(
220
- prompt, stream=True, raw=False,
223
+ for response in self.ask(
224
+ prompt, stream=True, raw=raw,
221
225
  optimizer=optimizer, conversationally=conversationally
222
226
  ):
223
- # Extract text from dict
224
- yield self.get_message(response_dict)
227
+ if raw:
228
+ yield response
229
+ else:
230
+ yield self.get_message(response)
225
231
  return text_stream()
226
- else: # non‐stream: return aggregated text
227
- # Call ask with stream=False, raw=False
232
+ else:
228
233
  response_data = self.ask(
229
234
  prompt,
230
235
  stream=False,
231
- raw=False,
236
+ raw=raw,
232
237
  optimizer=optimizer,
233
238
  conversationally=conversationally,
234
239
  )
235
- # Ensure response_data is a dict before passing to get_message
240
+ if raw:
241
+ return response_data
236
242
  if isinstance(response_data, dict):
237
- return self.get_message(response_data)
243
+ return self.get_message(response_data)
238
244
  else:
239
- # Handle unexpected generator case if ask(stream=False) behaves differently
240
- # This part might need adjustment based on actual behavior
241
- full_text = "".join(self.get_message(chunk) for chunk in response_data if isinstance(chunk, dict))
242
- return full_text
245
+ full_text = "".join(self.get_message(chunk) for chunk in response_data if isinstance(chunk, dict))
246
+ return full_text
243
247
 
244
248
 
245
249
  def get_message(self, response: dict) -> str:
@@ -0,0 +1,324 @@
1
+ import json
2
+ import uuid
3
+ import random
4
+ import string
5
+ from typing import Any, Dict, Generator, Union
6
+ import requests
7
+ import warnings
8
+ import urllib3
9
+
10
+ from webscout.AIutel import Optimizers
11
+ from webscout.AIutel import Conversation
12
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
13
+ from webscout.AIbase import Provider
14
+ from webscout import exceptions
15
+ from webscout.litagent import LitAgent
16
+
17
+ # Suppress only the single InsecureRequestWarning from urllib3 needed for verify=False
18
+ warnings.filterwarnings("ignore", category=urllib3.exceptions.InsecureRequestWarning)
19
+
20
+ class XenAI(Provider):
21
+
22
+ # Add more models if known, starting with the one from the example
23
+ AVAILABLE_MODELS = [
24
+ "gemini-2.5-pro-preview-05-06",
25
+ "gemini-2.5-flash-preview-05-20",
26
+ "o4-mini-high",
27
+ "grok-3-mini-fast-beta",
28
+ "grok-3-fast-beta",
29
+ "gpt-4.1",
30
+ "o3-high",
31
+ "gpt-4o-search-preview",
32
+ "gpt-4o",
33
+ "claude-sonnet-4-20250514",
34
+ "claude-sonnet-4-20250514-thinking",
35
+ "deepseek-ai/DeepSeek-V3-0324",
36
+ "deepseek-ai/DeepSeek-R1-0528",
37
+ "groq/deepseek-r1-distill-llama-70b",
38
+ "deepseek-ai/DeepSeek-Prover-V2-671B",
39
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
40
+ "meta-llama/llama-4-scout-17b-16e-instruct",
41
+ "cognitivecomputations/Dolphin3.0-Mistral-24B",
42
+ "sonar-pro",
43
+ "gpt-4o-mini",
44
+ "gemini-2.0-flash-lite-preview-02-05",
45
+ "claude-3-7-sonnet-20250219",
46
+ "claude-3-7-sonnet-20250219-thinking",
47
+ "claude-opus-4-20250514",
48
+ "claude-opus-4-20250514-thinking",
49
+ "chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8",
50
+ "chutesai/Llama-4-Scout-17B-16E-Instruct",
51
+ ]
52
+
53
+ def __init__(
54
+ self,
55
+ is_conversation: bool = True,
56
+ max_tokens: int = 2048,
57
+ timeout: int = 60,
58
+ intro: str = None,
59
+ filepath: str = None,
60
+ update_file: bool = True,
61
+ proxies: dict = {},
62
+ history_offset: int = 10250,
63
+ act: str = None,
64
+ model: str = "gemini-2.5-pro-preview-05-06",
65
+ system_prompt: str = "You are a helpful assistant.",
66
+ ):
67
+ """Initializes the xenai API client."""
68
+ if model not in self.AVAILABLE_MODELS:
69
+ print(f"Warning: Model '{model}' is not listed in AVAILABLE_MODELS. Proceeding with the provided model.")
70
+
71
+ self.api_endpoint = "https://chat.xenai.tech/api/chat/completions"
72
+
73
+ self.model = model
74
+ self.system_prompt = system_prompt
75
+
76
+ # Initialize requests Session
77
+ self.session = requests.Session()
78
+
79
+ # Set up headers based on the provided request
80
+ self.headers = {
81
+ **LitAgent().generate_fingerprint(),
82
+ 'origin': 'https://chat.xenai.tech',
83
+ 'referer': 'https://chat.xenai.tech/',
84
+ }
85
+
86
+ # Apply headers, proxies, and cookies to the session
87
+ self.session.headers.update(self.headers)
88
+ self.session.proxies.update(proxies)
89
+ # Always disable SSL verification for this session
90
+ self.session.verify = False
91
+
92
+ # Provider settings
93
+ self.is_conversation = is_conversation
94
+ self.max_tokens_to_sample = max_tokens
95
+ self.timeout = timeout
96
+ self.last_response = {}
97
+
98
+ # Initialize optimizers
99
+ self.__available_optimizers = (
100
+ method
101
+ for method in dir(Optimizers)
102
+ if callable(getattr(Optimizers, method))
103
+ and not method.startswith("__")
104
+ )
105
+ Conversation.intro = (
106
+ AwesomePrompts().get_act(
107
+ act, raise_not_found=True, default=None, case_insensitive=True
108
+ )
109
+ if act
110
+ else intro or Conversation.intro
111
+ )
112
+ self.conversation = Conversation(
113
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
114
+ )
115
+ self.conversation.history_offset = history_offset
116
+
117
+ # Token handling: always auto-fetch token, no cookies logic
118
+ self.token = self._auto_fetch_token()
119
+
120
+ # Set the Authorization header for the session
121
+ self.session.headers.update({
122
+ 'authorization': f'Bearer {self.token}',
123
+ })
124
+
125
+ def _auto_fetch_token(self):
126
+ """Automatically fetch a token from the signup endpoint using requests."""
127
+ session = requests.Session()
128
+ session.verify = False # Always disable SSL verification for this session
129
+ def random_string(length=8):
130
+ return ''.join(random.choices(string.ascii_lowercase, k=length))
131
+ name = random_string(8)
132
+ email = f"{name}@gmail.com"
133
+ password = email
134
+ profile_image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAAAXNSR0IArs4c6QAAAkRJREFUeF7tmDFOw0AUBdcSiIaKM3CKHIQ7UHEISq5AiUTFHYC0XADoTRsJEZFEjhFIaYAim92fjGFS736/zOTZzjavl0d98oMh0CgE4+IriEJYPhQC86EQhdAIwPL4DFEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg2BCfkAIqwAA94KZ/EAAAAASUVORK5CYII="
135
+ payload = {
136
+ "name": name,
137
+ "email": email,
138
+ "password": password,
139
+ "profile_image_url": profile_image_url
140
+ }
141
+ headers = {
142
+ **LitAgent().generate_fingerprint(),
143
+ 'origin': 'https://chat.xenai.tech',
144
+ 'referer': 'https://chat.xenai.tech/auth',
145
+ }
146
+ try:
147
+ resp = session.post(
148
+ "https://chat.xenai.tech/api/v1/auths/signup",
149
+ headers=headers,
150
+ json=payload,
151
+ timeout=30,
152
+ verify=False # Disable SSL verification for testing
153
+ )
154
+ if resp.ok:
155
+ data = resp.json()
156
+ token = data.get("token")
157
+ if token:
158
+ return token
159
+ set_cookie = resp.headers.get("set-cookie", "")
160
+ if "token=" in set_cookie:
161
+ return set_cookie.split("token=")[1].split(";")[0]
162
+ raise exceptions.FailedToGenerateResponseError(f"Failed to auto-fetch token: {resp.status_code} {resp.text}")
163
+ except Exception as e:
164
+ raise exceptions.FailedToGenerateResponseError(f"Token auto-fetch failed: {e}")
165
+
166
+ def ask(
167
+ self,
168
+ prompt: str,
169
+ stream: bool = False,
170
+ raw: bool = False,
171
+ optimizer: str = None,
172
+ conversationally: bool = False,
173
+ **kwargs
174
+ ) -> Union[Dict[str, Any], Generator]:
175
+ """Sends a prompt to the xenai API and returns the response."""
176
+
177
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
178
+
179
+ if optimizer:
180
+ if optimizer in self.__available_optimizers:
181
+ conversation_prompt = getattr(Optimizers, optimizer)(
182
+ conversation_prompt if conversationally else prompt
183
+ )
184
+ else:
185
+ raise exceptions.InvalidOptimizerError(
186
+ f"Optimizer is not one of {self.__available_optimizers}"
187
+ )
188
+
189
+ chat_id = kwargs.get("chat_id", str(uuid.uuid4()))
190
+ message_id = str(uuid.uuid4())
191
+
192
+ payload = {
193
+ "stream": stream,
194
+ "model": self.model,
195
+ "messages": [
196
+ {"role": "system", "content": self.system_prompt},
197
+ {"role": "user", "content": conversation_prompt}
198
+ ],
199
+ "params": kwargs.get("params", {}),
200
+ "tool_servers": kwargs.get("tool_servers", []),
201
+ "features": kwargs.get("features", {"web_search": False}),
202
+ "chat_id": chat_id,
203
+ "id": message_id,
204
+ "stream_options": kwargs.get("stream_options", {"include_usage": True})
205
+ }
206
+
207
+ def for_stream():
208
+ streaming_text = ""
209
+ try:
210
+ response = self.session.post(
211
+ self.api_endpoint,
212
+ json=payload,
213
+ stream=True,
214
+ timeout=self.timeout,
215
+ verify=False # Always disable SSL verification for this request
216
+ )
217
+ response.raise_for_status()
218
+
219
+ # Use sanitize_stream
220
+ processed_stream = sanitize_stream(
221
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
222
+ intro_value="data:",
223
+ to_json=True, # Stream sends JSON
224
+ skip_markers=["[DONE]"],
225
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
226
+ yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
227
+ )
228
+
229
+ for content_chunk in processed_stream:
230
+ # content_chunk is the string extracted by the content_extractor
231
+ if content_chunk and isinstance(content_chunk, str):
232
+ streaming_text += content_chunk
233
+ yield dict(text=content_chunk) if not raw else content_chunk
234
+
235
+ self.last_response = {"text": streaming_text}
236
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
237
+
238
+ except requests.RequestException as e:
239
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (requests): {e}") from e
240
+ except Exception as e:
241
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
242
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
243
+
244
+ def for_non_stream():
245
+ full_text = ""
246
+ try:
247
+ stream_generator = self.ask(
248
+ prompt, stream=True, raw=False, optimizer=optimizer, conversationally=conversationally, **kwargs
249
+ )
250
+ for chunk_data in stream_generator:
251
+ if isinstance(chunk_data, dict):
252
+ full_text += chunk_data["text"]
253
+ elif isinstance(chunk_data, str):
254
+ full_text += chunk_data
255
+ except requests.RequestException as e:
256
+ raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response (requests): {str(e)}") from e
257
+ except Exception as e:
258
+ raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {str(e)}") from e
259
+
260
+ return full_text if raw else self.last_response
261
+
262
+ return for_stream() if stream else for_non_stream()
263
+
264
+ def chat(
265
+ self,
266
+ prompt: str,
267
+ stream: bool = False,
268
+ optimizer: str = None,
269
+ conversationally: bool = False,
270
+ **kwargs
271
+ ) -> Union[str, Generator[str, None, None]]:
272
+ """Generates a response from the xenai API."""
273
+
274
+ def for_stream_chat() -> Generator[str, None, None]:
275
+ gen = self.ask(
276
+ prompt, stream=True, raw=False,
277
+ optimizer=optimizer, conversationally=conversationally, **kwargs
278
+ )
279
+ for response_dict in gen:
280
+ yield self.get_message(response_dict)
281
+
282
+ def for_non_stream_chat() -> str:
283
+ response_data = self.ask(
284
+ prompt, stream=False, raw=False,
285
+ optimizer=optimizer, conversationally=conversationally, **kwargs
286
+ )
287
+ return self.get_message(response_data)
288
+
289
+ return for_stream_chat() if stream else for_non_stream_chat()
290
+
291
+ def get_message(self, response: Dict[str, Any]) -> str:
292
+ """Extracts the message from the API response."""
293
+ assert isinstance(response, dict), "Response should be of dict data-type only"
294
+ return response.get("text", "")
295
+
296
+ # Example usage (no cookies file needed)
297
+ if __name__ == "__main__":
298
+ from rich import print
299
+
300
+ print("-" * 80)
301
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
302
+ print("-" * 80)
303
+
304
+ for model in XenAI.AVAILABLE_MODELS:
305
+ try:
306
+ test_ai = XenAI(model=model, timeout=60)
307
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
308
+ response_text = ""
309
+ # Accumulate the response text without printing in the loop
310
+ for chunk in response:
311
+ response_text += chunk
312
+
313
+ if response_text and len(response_text.strip()) > 0:
314
+ status = "✓"
315
+ # Truncate response if too long
316
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
317
+ else:
318
+ status = "✗"
319
+ display_text = "Empty or invalid response"
320
+ # Print the final status and response, overwriting the "Testing..." line
321
+ print(f"\r{model:<50} {status:<10} {display_text}")
322
+ except Exception as e:
323
+ # Print error, overwriting the "Testing..." line
324
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")