webscout 8.3.1__py3-none-any.whl → 8.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (77) hide show
  1. webscout/AIutel.py +46 -53
  2. webscout/Bing_search.py +418 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  5. webscout/Provider/GeminiProxy.py +140 -0
  6. webscout/Provider/MCPCore.py +78 -75
  7. webscout/Provider/OPENAI/BLACKBOXAI.py +1 -4
  8. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  9. webscout/Provider/OPENAI/README.md +2 -0
  10. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  11. webscout/Provider/OPENAI/__init__.py +15 -1
  12. webscout/Provider/OPENAI/autoproxy.py +332 -39
  13. webscout/Provider/OPENAI/base.py +15 -5
  14. webscout/Provider/OPENAI/e2b.py +0 -1
  15. webscout/Provider/OPENAI/mcpcore.py +109 -70
  16. webscout/Provider/OPENAI/scirachat.py +59 -51
  17. webscout/Provider/OPENAI/toolbaz.py +2 -9
  18. webscout/Provider/OPENAI/xenai.py +514 -0
  19. webscout/Provider/OPENAI/yep.py +8 -2
  20. webscout/Provider/TTI/__init__.py +1 -0
  21. webscout/Provider/TTI/bing.py +231 -0
  22. webscout/Provider/TTS/speechma.py +45 -39
  23. webscout/Provider/TogetherAI.py +366 -0
  24. webscout/Provider/XenAI.py +324 -0
  25. webscout/Provider/__init__.py +8 -3
  26. webscout/Provider/deepseek_assistant.py +378 -0
  27. webscout/auth/__init__.py +44 -0
  28. webscout/auth/api_key_manager.py +189 -0
  29. webscout/auth/auth_system.py +100 -0
  30. webscout/auth/config.py +76 -0
  31. webscout/auth/database.py +400 -0
  32. webscout/auth/exceptions.py +67 -0
  33. webscout/auth/middleware.py +248 -0
  34. webscout/auth/models.py +130 -0
  35. webscout/auth/providers.py +257 -0
  36. webscout/auth/rate_limiter.py +254 -0
  37. webscout/auth/request_models.py +127 -0
  38. webscout/auth/request_processing.py +226 -0
  39. webscout/auth/routes.py +526 -0
  40. webscout/auth/schemas.py +103 -0
  41. webscout/auth/server.py +312 -0
  42. webscout/auth/static/favicon.svg +11 -0
  43. webscout/auth/swagger_ui.py +203 -0
  44. webscout/auth/templates/components/authentication.html +237 -0
  45. webscout/auth/templates/components/base.html +103 -0
  46. webscout/auth/templates/components/endpoints.html +750 -0
  47. webscout/auth/templates/components/examples.html +491 -0
  48. webscout/auth/templates/components/footer.html +75 -0
  49. webscout/auth/templates/components/header.html +27 -0
  50. webscout/auth/templates/components/models.html +286 -0
  51. webscout/auth/templates/components/navigation.html +70 -0
  52. webscout/auth/templates/static/api.js +455 -0
  53. webscout/auth/templates/static/icons.js +168 -0
  54. webscout/auth/templates/static/main.js +784 -0
  55. webscout/auth/templates/static/particles.js +201 -0
  56. webscout/auth/templates/static/styles.css +3353 -0
  57. webscout/auth/templates/static/ui.js +374 -0
  58. webscout/auth/templates/swagger_ui.html +170 -0
  59. webscout/client.py +49 -3
  60. webscout/scout/core/scout.py +104 -26
  61. webscout/scout/element.py +139 -18
  62. webscout/swiftcli/core/cli.py +14 -3
  63. webscout/swiftcli/decorators/output.py +59 -9
  64. webscout/update_checker.py +31 -49
  65. webscout/version.py +1 -1
  66. webscout/webscout_search.py +4 -12
  67. webscout/webscout_search_async.py +3 -10
  68. webscout/yep_search.py +2 -11
  69. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/METADATA +41 -11
  70. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/RECORD +74 -36
  71. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/entry_points.txt +1 -1
  72. webscout/Provider/HF_space/__init__.py +0 -0
  73. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  74. webscout/Provider/OPENAI/api.py +0 -1320
  75. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/WHEEL +0 -0
  76. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/licenses/LICENSE.md +0 -0
  77. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,514 @@
1
+ import time
2
+ import uuid
3
+ import json
4
+ import random
5
+ import string
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+ import warnings
8
+
9
+ # Use curl_cffi for requests
10
+ from curl_cffi.requests import Session
11
+ from curl_cffi import CurlError
12
+ import urllib3
13
+
14
+ # Import base classes and utility structures
15
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
16
+ from webscout.Provider.OPENAI.utils import (
17
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
18
+ ChatCompletionMessage, CompletionUsage
19
+ )
20
+ warnings.filterwarnings("ignore", category=urllib3.exceptions.InsecureRequestWarning)
21
+ # Import LitAgent for user agent generation
22
+ try:
23
+ from webscout.litagent import LitAgent
24
+ except ImportError:
25
+ # Define a dummy LitAgent if webscout is not installed or accessible
26
+ class LitAgent:
27
+ def random(self) -> str:
28
+ # Return a default user agent if LitAgent is unavailable
29
+ print("Warning: LitAgent not found. Using default minimal headers.")
30
+ return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
31
+
32
+ # ANSI escape codes for formatting
33
+ BOLD = "\033[1m"
34
+ RED = "\033[91m"
35
+ RESET = "\033[0m"
36
+
37
+ class Completions(BaseCompletions):
38
+ def __init__(self, client: 'XenAI'):
39
+ self._client = client
40
+
41
+ def create(
42
+ self,
43
+ *,
44
+ model: str,
45
+ messages: List[Dict[str, str]],
46
+ max_tokens: Optional[int] = None,
47
+ stream: bool = False,
48
+ temperature: Optional[float] = None,
49
+ top_p: Optional[float] = None,
50
+ timeout: Optional[int] = None,
51
+ proxies: Optional[Dict[str, str]] = None,
52
+ **kwargs: Any
53
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
54
+ """
55
+ Creates a model response for the given chat conversation using XenAI API.
56
+ Mimics openai.chat.completions.create
57
+ """
58
+ if model not in self._client.AVAILABLE_MODELS:
59
+ raise ValueError(f"Model '{model}' not supported by XenAI. Available: {self._client.AVAILABLE_MODELS}")
60
+
61
+ # Construct the XenAI-specific payload
62
+ payload = {
63
+ "stream": stream,
64
+ "model": model,
65
+ "messages": messages,
66
+ "params": kwargs.get("params", {}),
67
+ "tool_servers": kwargs.get("tool_servers", []),
68
+ "features": kwargs.get("features", {"web_search": False}),
69
+ "chat_id": kwargs.get("chat_id", str(uuid.uuid4())),
70
+ "id": str(uuid.uuid4()), # Message ID
71
+ "stream_options": kwargs.get("stream_options", {"include_usage": True})
72
+ }
73
+
74
+ # Add optional OpenAI params to XenAI's 'params' field if provided
75
+ if temperature is not None: payload["params"]["temperature"] = temperature
76
+ if top_p is not None: payload["params"]["top_p"] = top_p
77
+ if max_tokens is not None: payload["params"]["max_tokens"] = max_tokens
78
+
79
+ # Generate standard OpenAI-compatible IDs
80
+ request_id = f"chatcmpl-{uuid.uuid4()}"
81
+ created_time = int(time.time())
82
+
83
+ if stream:
84
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
85
+ else:
86
+ return self._create_non_stream_from_stream(request_id, created_time, model, payload, timeout, proxies)
87
+
88
+ def _create_stream(
89
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
90
+ ) -> Generator[ChatCompletionChunk, None, None]:
91
+ """Handles the streaming response from XenAI."""
92
+ final_usage_data = None # To store usage if received
93
+ try:
94
+ response = self._client.session.post(
95
+ self._client.api_endpoint,
96
+ headers=self._client.headers,
97
+ json=payload,
98
+ stream=True,
99
+ timeout=timeout or self._client.timeout,
100
+ proxies=proxies or getattr(self._client, "proxies", None),
101
+ impersonate="chrome110",
102
+ verify=False
103
+ )
104
+
105
+ if not response.ok:
106
+ try:
107
+ error_text = response.text
108
+ except Exception:
109
+ error_text = "<Failed to read error response>"
110
+ raise IOError(
111
+ f"XenAI API Error: {response.status_code} {response.reason} - {error_text}"
112
+ )
113
+
114
+ for line_bytes in response.iter_lines():
115
+ if line_bytes:
116
+ try:
117
+ line = line_bytes.decode('utf-8').strip()
118
+ if line.startswith("data: "):
119
+ json_str = line[6:]
120
+ if json_str == "[DONE]":
121
+ break # End of stream signal
122
+
123
+ json_data = json.loads(json_str)
124
+
125
+ # Check for usage data in the chunk (often comes near the end)
126
+ if 'usage' in json_data and json_data['usage']:
127
+ final_usage_data = json_data['usage']
128
+ # Don't yield a chunk just for usage, wait for content or final chunk
129
+
130
+ if 'choices' in json_data and len(json_data['choices']) > 0:
131
+ choice_data = json_data['choices'][0]
132
+ delta_data = choice_data.get('delta', {})
133
+ finish_reason = choice_data.get('finish_reason')
134
+ content = delta_data.get('content')
135
+ role = delta_data.get('role', 'assistant') # Default role
136
+
137
+ # Only yield chunks with content or finish reason
138
+ if content is not None or finish_reason:
139
+ delta = ChoiceDelta(content=content, role=role)
140
+ choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
141
+ chunk = ChatCompletionChunk(
142
+ id=request_id,
143
+ choices=[choice],
144
+ created=created_time,
145
+ model=model,
146
+ system_fingerprint=json_data.get('system_fingerprint')
147
+ )
148
+ yield chunk
149
+
150
+ except (json.JSONDecodeError, UnicodeDecodeError):
151
+ print(f"{RED}Warning: Could not decode JSON line: {line}{RESET}")
152
+ continue
153
+ except Exception as e:
154
+ print(f"{RED}Error processing stream line: {e} - Line: {line}{RESET}")
155
+ continue
156
+
157
+ # Final chunk to ensure stream termination is signaled correctly
158
+ # (even if [DONE] was received, this confirms the generator end)
159
+ final_delta = ChoiceDelta()
160
+ # Include usage in the final chunk if available
161
+ usage_obj = None
162
+ if final_usage_data:
163
+ usage_obj = CompletionUsage(
164
+ prompt_tokens=final_usage_data.get('prompt_tokens', 0),
165
+ completion_tokens=final_usage_data.get('completion_tokens', 0),
166
+ total_tokens=final_usage_data.get('total_tokens', 0),
167
+ )
168
+
169
+ final_choice = Choice(index=0, delta=final_delta, finish_reason="stop")
170
+ final_chunk = ChatCompletionChunk(
171
+ id=request_id,
172
+ choices=[final_choice],
173
+ created=created_time,
174
+ model=model,
175
+ # system_fingerprint=..., # Can be added if available in final event
176
+ )
177
+ # Add usage to the final chunk dictionary representation if available
178
+ if hasattr(final_chunk, "model_dump"):
179
+ final_chunk_dict = final_chunk.model_dump(exclude_none=True)
180
+ else:
181
+ final_chunk_dict = final_chunk.dict(exclude_none=True)
182
+ if usage_obj:
183
+ if hasattr(usage_obj, "model_dump"):
184
+ final_chunk_dict["usage"] = usage_obj.model_dump(exclude_none=True)
185
+ else:
186
+ final_chunk_dict["usage"] = usage_obj.dict(exclude_none=True)
187
+
188
+ # Yield the final dictionary or object as needed by downstream consumers
189
+ # Yielding the object aligns better with the generator type hint
190
+ yield final_chunk
191
+
192
+
193
+ except CurlError as e:
194
+ print(f"{RED}CurlError during XenAI stream request: {e}{RESET}")
195
+ raise IOError(f"XenAI request failed due to network/curl issue: {e}") from e
196
+ except Exception as e:
197
+ print(f"{RED}Unexpected error during XenAI stream: {e}{RESET}")
198
+ error_details = ""
199
+ if hasattr(e, 'response') and e.response is not None:
200
+ error_details = f" - Status: {e.response.status_code}, Response: {e.response.text}"
201
+ raise IOError(f"XenAI stream processing failed: {e}{error_details}") from e
202
+
203
+ def _create_non_stream_from_stream(
204
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
205
+ ) -> ChatCompletion:
206
+ """Handles the non-streaming response by making a single POST request (like deepinfra)."""
207
+ try:
208
+ # Ensure stream is False for non-streaming
209
+ payload = dict(payload)
210
+ payload["stream"] = False
211
+
212
+ response = self._client.session.post(
213
+ self._client.api_endpoint,
214
+ headers=self._client.headers,
215
+ json=payload,
216
+ timeout=timeout or self._client.timeout,
217
+ proxies=proxies or getattr(self._client, "proxies", None),
218
+ impersonate="chrome110",
219
+ verify=False
220
+ )
221
+ if not response.ok:
222
+ try:
223
+ error_text = response.text
224
+ except Exception:
225
+ error_text = "<Failed to read error response>"
226
+ raise IOError(
227
+ f"XenAI API Error: {response.status_code} {response.reason} - {error_text}"
228
+ )
229
+
230
+ data = response.json()
231
+ choices_data = data.get('choices', [])
232
+ usage_data = data.get('usage', {})
233
+
234
+ choices = []
235
+ for choice_d in choices_data:
236
+ message_d = choice_d.get('message', {})
237
+ message = ChatCompletionMessage(
238
+ role=message_d.get('role', 'assistant'),
239
+ content=message_d.get('content', '')
240
+ )
241
+ choice = Choice(
242
+ index=choice_d.get('index', 0),
243
+ message=message,
244
+ finish_reason=choice_d.get('finish_reason', 'stop')
245
+ )
246
+ choices.append(choice)
247
+
248
+ usage = CompletionUsage(
249
+ prompt_tokens=usage_data.get('prompt_tokens', 0),
250
+ completion_tokens=usage_data.get('completion_tokens', 0),
251
+ total_tokens=usage_data.get('total_tokens', 0)
252
+ )
253
+
254
+ completion = ChatCompletion(
255
+ id=request_id,
256
+ choices=choices,
257
+ created=created_time,
258
+ model=data.get('model', model),
259
+ usage=usage,
260
+ )
261
+ return completion
262
+
263
+ except CurlError as e:
264
+ print(f"{RED}CurlError during XenAI non-stream request: {e}{RESET}")
265
+ raise IOError(f"XenAI request failed due to network/curl issue: {e}") from e
266
+ except Exception as e:
267
+ print(f"{RED}Unexpected error during XenAI non-stream: {e}{RESET}")
268
+ error_details = ""
269
+ if hasattr(e, 'response') and e.response is not None:
270
+ error_details = f" - Status: {e.response.status_code}, Response: {e.response.text}"
271
+ raise IOError(f"XenAI non-stream processing failed: {e}{error_details}") from e
272
+
273
+
274
+ class Chat(BaseChat):
275
+ def __init__(self, client: 'XenAI'):
276
+ self.completions = Completions(client)
277
+
278
+
279
+ class XenAI(OpenAICompatibleProvider):
280
+ """
281
+ OpenAI-compatible client for the XenAI API (chat.XenAI.xyz).
282
+
283
+ Usage:
284
+ client = XenAI()
285
+ response = client.chat.completions.create(
286
+ model="google/gemma-7b-it",
287
+ messages=[{"role": "user", "content": "Hello!"}]
288
+ )
289
+ print(response.choices[0].message.content)
290
+ """
291
+
292
+ AVAILABLE_MODELS = [
293
+ "gemini-2.5-pro-preview-05-06",
294
+ "gemini-2.5-flash-preview-05-20",
295
+ "o4-mini-high",
296
+ "grok-3-mini-fast-beta",
297
+ "grok-3-fast-beta",
298
+ "gpt-4.1",
299
+ "o3-high",
300
+ "gpt-4o-search-preview",
301
+ "gpt-4o",
302
+ "claude-sonnet-4-20250514",
303
+ "claude-sonnet-4-20250514-thinking",
304
+ "deepseek-ai/DeepSeek-V3-0324",
305
+ "deepseek-ai/DeepSeek-R1-0528",
306
+ "groq/deepseek-r1-distill-llama-70b",
307
+ "deepseek-ai/DeepSeek-Prover-V2-671B",
308
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
309
+ "meta-llama/llama-4-scout-17b-16e-instruct",
310
+ "cognitivecomputations/Dolphin3.0-Mistral-24B",
311
+ "sonar-pro",
312
+ "gpt-4o-mini",
313
+ "gemini-2.0-flash-lite-preview-02-05",
314
+ "claude-3-7-sonnet-20250219",
315
+ "claude-3-7-sonnet-20250219-thinking",
316
+ "claude-opus-4-20250514",
317
+ "claude-opus-4-20250514-thinking",
318
+ "chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8",
319
+ "chutesai/Llama-4-Scout-17B-16E-Instruct",
320
+ ]
321
+
322
+ def _auto_fetch_token(self):
323
+ """Automatically fetch a token from the signup endpoint."""
324
+ session = Session()
325
+ def random_string(length=8):
326
+ return ''.join(random.choices(string.ascii_lowercase, k=length))
327
+ name = random_string(6)
328
+ email = f"{random_string(8)}@gmail.com"
329
+ password = email
330
+ profile_image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAAAXNSR0IArs4c6QAAAkRJREFUeF7tmDFOw0AUBdcSiIaKM3CKHIQ7UHEISq5AiUTFHYC0XADoTRsJEZFEjhFIaYAim92fjGFS736/zOTZzjavl0d98oMh0CgE4+IriEJYPhQC86EQhdAIwPL4DFEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg2BCfkAIqwAA94KZ/EAAAAASUVORK5CYII="
331
+ payload = {
332
+ "name": name,
333
+ "email": email,
334
+ "password": password,
335
+ "profile_image_url": profile_image_url
336
+ }
337
+ # Add more detailed browser-like headers
338
+ try:
339
+ # First try with fingerprint from LitAgent
340
+ headers = {
341
+ **LitAgent().generate_fingerprint(),
342
+ 'origin': 'https://chat.xenai.tech',
343
+ 'referer': 'https://chat.xenai.tech/auth',
344
+ 'sec-ch-ua': '"Google Chrome";v="127", "Chromium";v="127", "Not=A?Brand";v="24"',
345
+ 'sec-ch-ua-mobile': '?0',
346
+ 'sec-ch-ua-platform': '"Windows"',
347
+ 'sec-fetch-dest': 'empty',
348
+ 'sec-fetch-mode': 'cors',
349
+ 'sec-fetch-site': 'same-origin',
350
+ 'accept-language': 'en-US,en;q=0.9'
351
+ }
352
+ except:
353
+ # Fallback to basic Chrome user agent if LitAgent fails
354
+ headers = {
355
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
356
+ 'origin': 'https://chat.xenai.tech',
357
+ 'referer': 'https://chat.xenai.tech/auth',
358
+ 'sec-ch-ua': '"Google Chrome";v="127", "Chromium";v="127", "Not=A?Brand";v="24"',
359
+ 'sec-ch-ua-mobile': '?0',
360
+ 'sec-ch-ua-platform': '"Windows"',
361
+ 'sec-fetch-dest': 'empty',
362
+ 'sec-fetch-mode': 'cors',
363
+ 'sec-fetch-site': 'same-origin',
364
+ 'accept-language': 'en-US,en;q=0.9'
365
+ }
366
+ try:
367
+ # Try signup with newer Chrome version
368
+ resp = session.post(
369
+ "https://chat.xenai.tech/api/v1/auths/signup",
370
+ headers=headers,
371
+ json=payload,
372
+ timeout=30,
373
+ impersonate="chrome120", # Try newer Chrome version
374
+ verify=False
375
+ )
376
+
377
+ if resp.ok:
378
+ data = resp.json()
379
+ token = data.get("token")
380
+ if token:
381
+ return token
382
+ # fallback: try to get from set-cookie
383
+ set_cookie = resp.headers.get("set-cookie", "")
384
+ if "token=" in set_cookie:
385
+ return set_cookie.split("token=")[1].split(";")[0]
386
+
387
+ # If signup fails, try login (account might already exist)
388
+ login_resp = session.post(
389
+ "https://chat.xenai.tech/api/v1/auths/login",
390
+ headers=headers,
391
+ json={"email": email, "password": password},
392
+ timeout=30,
393
+ impersonate="chrome120",
394
+ verify=False
395
+ )
396
+
397
+ if login_resp.ok:
398
+ data = login_resp.json()
399
+ token = data.get("token")
400
+ if token:
401
+ return token
402
+
403
+ # Try guest authentication as last resort
404
+ guest_resp = session.post(
405
+ "https://chat.xenai.tech/api/v1/auths/guest",
406
+ headers=headers,
407
+ json={},
408
+ timeout=30,
409
+ impersonate="chrome120",
410
+ verify=False
411
+ )
412
+
413
+ if guest_resp.ok:
414
+ data = guest_resp.json()
415
+ token = data.get("token")
416
+ if token:
417
+ return token
418
+
419
+ raise RuntimeError(f"Failed to auto-fetch token: {resp.status_code} {resp.text}")
420
+ except Exception as e:
421
+ raise RuntimeError(f"Token auto-fetch failed: {e}")
422
+
423
+ def __init__(
424
+ self,
425
+ timeout: int = 60,
426
+ ):
427
+ """
428
+ Initializes the XenAI OpenAI-compatible client.
429
+
430
+ Args:
431
+ timeout: Request timeout in seconds.
432
+ """
433
+ self.api_endpoint = "https://chat.xenai.tech/api/chat/completions"
434
+ self.timeout = timeout
435
+ self.token = self._auto_fetch_token()
436
+ self.session = Session() # Use curl_cffi Session
437
+
438
+ # Enhanced headers with browser impersonation
439
+ try:
440
+ self.headers = {
441
+ **LitAgent().generate_fingerprint(),
442
+ 'origin': 'https://chat.xenai.tech',
443
+ 'referer': 'https://chat.xenai.tech/',
444
+ 'sec-ch-ua': '"Google Chrome";v="127", "Chromium";v="127", "Not=A?Brand";v="24"',
445
+ 'sec-ch-ua-mobile': '?0',
446
+ 'sec-ch-ua-platform': '"Windows"',
447
+ 'sec-fetch-dest': 'empty',
448
+ 'sec-fetch-mode': 'cors',
449
+ 'sec-fetch-site': 'same-origin',
450
+ 'accept-language': 'en-US,en;q=0.9'
451
+ }
452
+ except:
453
+ self.headers = {
454
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
455
+ 'origin': 'https://chat.xenai.tech',
456
+ 'referer': 'https://chat.xenai.tech/',
457
+ 'sec-ch-ua': '"Google Chrome";v="127", "Chromium";v="127", "Not=A?Brand";v="24"',
458
+ 'sec-ch-ua-mobile': '?0',
459
+ 'sec-ch-ua-platform': '"Windows"',
460
+ 'sec-fetch-dest': 'empty',
461
+ 'sec-fetch-mode': 'cors',
462
+ 'sec-fetch-site': 'same-origin',
463
+ 'accept-language': 'en-US,en;q=0.9'
464
+ }
465
+
466
+ # Update headers and set authorization token
467
+ self.headers['authorization'] = f'Bearer {self.token}'
468
+ self.session.headers.update(self.headers)
469
+
470
+ # Configure session
471
+ self.session.impersonate = "chrome120"
472
+ self.session.verify = False
473
+
474
+ self.chat = Chat(self) # Initialize chat interface
475
+
476
+ @property
477
+ def models(self):
478
+ class _ModelList:
479
+ def list(inner_self):
480
+ return type(self).AVAILABLE_MODELS
481
+ return _ModelList()
482
+
483
+ if __name__ == "__main__":
484
+ print("-" * 100)
485
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
486
+ print("-" * 100)
487
+
488
+ test_prompt = "Say 'Hello' in one word"
489
+
490
+ client = XenAI()
491
+ for model in client.models.list():
492
+ print(f"\rTesting {model}...", end="")
493
+ try:
494
+ presp = client.chat.completions.create(
495
+ model=model,
496
+ messages=[{"role": "user", "content": test_prompt}]
497
+ )
498
+ # Try to get the response text (truncate to 100 chars)
499
+ if hasattr(presp, "choices") and presp.choices and hasattr(presp.choices[0], "message"):
500
+ content = presp.choices[0].message.content or ""
501
+ clean_text = content.strip().encode('utf-8', errors='ignore').decode('utf-8')
502
+ display_text = clean_text[:100] + "..." if len(clean_text) > 100 else clean_text
503
+ status = "✓" if clean_text else "✗"
504
+ if not clean_text:
505
+ display_text = "Empty or invalid response"
506
+ else:
507
+ status = "✗"
508
+ display_text = "Empty or invalid response"
509
+ print(f"\r{model:<50} {status:<10} {display_text}")
510
+ except Exception as e:
511
+ error_msg = str(e)
512
+ if len(error_msg) > 100:
513
+ error_msg = error_msg[:97] + "..."
514
+ print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")
@@ -5,8 +5,8 @@ import json
5
5
  from typing import List, Dict, Optional, Union, Generator, Any
6
6
 
7
7
  # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
8
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from webscout.Provider.OPENAI.utils import (
10
10
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
11
  ChatCompletionMessage, CompletionUsage, get_system_prompt, count_tokens # Import count_tokens
12
12
  )
@@ -315,6 +315,12 @@ class YEPCHAT(OpenAICompatibleProvider):
315
315
  "Sec-CH-UA-Mobile": "?0",
316
316
  "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
317
317
  "User-Agent": fingerprint["user_agent"],
318
+ "x-forwarded-for": fingerprint["x-forwarded-for"],
319
+ "x-real-ip": fingerprint["x-real-ip"],
320
+ "x-client-ip": fingerprint["x-client-ip"],
321
+ "forwarded": fingerprint["forwarded"],
322
+ "x-forwarded-proto": "https",
323
+ "x-request-id": fingerprint["x-request-id"],
318
324
  }
319
325
  self.session.headers.update(self.headers)
320
326
 
@@ -7,3 +7,4 @@ from .aiarta import *
7
7
  from .gpt1image import *
8
8
  from .imagen import *
9
9
  from .together import *
10
+ from .bing import *