webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (114) hide show
  1. webscout/AIutel.py +180 -78
  2. webscout/Bing_search.py +417 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/ExaChat.py +84 -58
  8. webscout/Provider/GeminiProxy.py +140 -0
  9. webscout/Provider/HeckAI.py +85 -80
  10. webscout/Provider/Jadve.py +56 -50
  11. webscout/Provider/MCPCore.py +78 -75
  12. webscout/Provider/MiniMax.py +207 -0
  13. webscout/Provider/Nemotron.py +41 -13
  14. webscout/Provider/Netwrck.py +34 -51
  15. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
  16. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  17. webscout/Provider/OPENAI/MiniMax.py +298 -0
  18. webscout/Provider/OPENAI/README.md +32 -29
  19. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  20. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  21. webscout/Provider/OPENAI/__init__.py +17 -1
  22. webscout/Provider/OPENAI/autoproxy.py +1067 -39
  23. webscout/Provider/OPENAI/base.py +17 -76
  24. webscout/Provider/OPENAI/deepinfra.py +42 -108
  25. webscout/Provider/OPENAI/e2b.py +0 -1
  26. webscout/Provider/OPENAI/flowith.py +179 -166
  27. webscout/Provider/OPENAI/friendli.py +233 -0
  28. webscout/Provider/OPENAI/mcpcore.py +109 -70
  29. webscout/Provider/OPENAI/monochat.py +329 -0
  30. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  31. webscout/Provider/OPENAI/scirachat.py +59 -51
  32. webscout/Provider/OPENAI/toolbaz.py +3 -9
  33. webscout/Provider/OPENAI/typegpt.py +1 -1
  34. webscout/Provider/OPENAI/utils.py +19 -42
  35. webscout/Provider/OPENAI/x0gpt.py +14 -2
  36. webscout/Provider/OPENAI/xenai.py +514 -0
  37. webscout/Provider/OPENAI/yep.py +8 -2
  38. webscout/Provider/OpenGPT.py +54 -32
  39. webscout/Provider/PI.py +58 -84
  40. webscout/Provider/StandardInput.py +32 -13
  41. webscout/Provider/TTI/README.md +9 -9
  42. webscout/Provider/TTI/__init__.py +3 -1
  43. webscout/Provider/TTI/aiarta.py +92 -78
  44. webscout/Provider/TTI/bing.py +231 -0
  45. webscout/Provider/TTI/infip.py +212 -0
  46. webscout/Provider/TTI/monochat.py +220 -0
  47. webscout/Provider/TTS/speechma.py +45 -39
  48. webscout/Provider/TeachAnything.py +11 -3
  49. webscout/Provider/TextPollinationsAI.py +78 -70
  50. webscout/Provider/TogetherAI.py +350 -0
  51. webscout/Provider/Venice.py +37 -46
  52. webscout/Provider/VercelAI.py +27 -24
  53. webscout/Provider/WiseCat.py +35 -35
  54. webscout/Provider/WrDoChat.py +22 -26
  55. webscout/Provider/WritingMate.py +26 -22
  56. webscout/Provider/XenAI.py +324 -0
  57. webscout/Provider/__init__.py +10 -5
  58. webscout/Provider/deepseek_assistant.py +378 -0
  59. webscout/Provider/granite.py +48 -57
  60. webscout/Provider/koala.py +51 -39
  61. webscout/Provider/learnfastai.py +49 -64
  62. webscout/Provider/llmchat.py +79 -93
  63. webscout/Provider/llmchatco.py +63 -78
  64. webscout/Provider/multichat.py +51 -40
  65. webscout/Provider/oivscode.py +1 -1
  66. webscout/Provider/scira_chat.py +159 -96
  67. webscout/Provider/scnet.py +13 -13
  68. webscout/Provider/searchchat.py +13 -13
  69. webscout/Provider/sonus.py +12 -11
  70. webscout/Provider/toolbaz.py +25 -8
  71. webscout/Provider/turboseek.py +41 -42
  72. webscout/Provider/typefully.py +27 -12
  73. webscout/Provider/typegpt.py +41 -46
  74. webscout/Provider/uncovr.py +55 -90
  75. webscout/Provider/x0gpt.py +33 -17
  76. webscout/Provider/yep.py +79 -96
  77. webscout/auth/__init__.py +55 -0
  78. webscout/auth/api_key_manager.py +189 -0
  79. webscout/auth/auth_system.py +100 -0
  80. webscout/auth/config.py +76 -0
  81. webscout/auth/database.py +400 -0
  82. webscout/auth/exceptions.py +67 -0
  83. webscout/auth/middleware.py +248 -0
  84. webscout/auth/models.py +130 -0
  85. webscout/auth/providers.py +279 -0
  86. webscout/auth/rate_limiter.py +254 -0
  87. webscout/auth/request_models.py +127 -0
  88. webscout/auth/request_processing.py +226 -0
  89. webscout/auth/routes.py +550 -0
  90. webscout/auth/schemas.py +103 -0
  91. webscout/auth/server.py +367 -0
  92. webscout/client.py +121 -70
  93. webscout/litagent/Readme.md +68 -55
  94. webscout/litagent/agent.py +99 -9
  95. webscout/scout/core/scout.py +104 -26
  96. webscout/scout/element.py +139 -18
  97. webscout/swiftcli/core/cli.py +14 -3
  98. webscout/swiftcli/decorators/output.py +59 -9
  99. webscout/update_checker.py +31 -49
  100. webscout/version.py +1 -1
  101. webscout/webscout_search.py +4 -12
  102. webscout/webscout_search_async.py +3 -10
  103. webscout/yep_search.py +2 -11
  104. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
  105. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
  106. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
  107. webscout/Provider/HF_space/__init__.py +0 -0
  108. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  109. webscout/Provider/OPENAI/api.py +0 -1320
  110. webscout/Provider/TTI/fastflux.py +0 -233
  111. webscout/Provider/Writecream.py +0 -246
  112. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  113. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  114. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,514 @@
1
+ import time
2
+ import uuid
3
+ import json
4
+ import random
5
+ import string
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+ import warnings
8
+
9
+ # Use curl_cffi for requests
10
+ from curl_cffi.requests import Session
11
+ from curl_cffi import CurlError
12
+ import urllib3
13
+
14
+ # Import base classes and utility structures
15
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
16
+ from webscout.Provider.OPENAI.utils import (
17
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
18
+ ChatCompletionMessage, CompletionUsage
19
+ )
20
+ warnings.filterwarnings("ignore", category=urllib3.exceptions.InsecureRequestWarning)
21
+ # Import LitAgent for user agent generation
22
+ try:
23
+ from webscout.litagent import LitAgent
24
+ except ImportError:
25
+ # Define a dummy LitAgent if webscout is not installed or accessible
26
+ class LitAgent:
27
+ def random(self) -> str:
28
+ # Return a default user agent if LitAgent is unavailable
29
+ print("Warning: LitAgent not found. Using default minimal headers.")
30
+ return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
31
+
32
+ # ANSI escape codes for formatting
33
+ BOLD = "\033[1m"
34
+ RED = "\033[91m"
35
+ RESET = "\033[0m"
36
+
37
+ class Completions(BaseCompletions):
38
+ def __init__(self, client: 'XenAI'):
39
+ self._client = client
40
+
41
+ def create(
42
+ self,
43
+ *,
44
+ model: str,
45
+ messages: List[Dict[str, str]],
46
+ max_tokens: Optional[int] = None,
47
+ stream: bool = False,
48
+ temperature: Optional[float] = None,
49
+ top_p: Optional[float] = None,
50
+ timeout: Optional[int] = None,
51
+ proxies: Optional[Dict[str, str]] = None,
52
+ **kwargs: Any
53
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
54
+ """
55
+ Creates a model response for the given chat conversation using XenAI API.
56
+ Mimics openai.chat.completions.create
57
+ """
58
+ if model not in self._client.AVAILABLE_MODELS:
59
+ raise ValueError(f"Model '{model}' not supported by XenAI. Available: {self._client.AVAILABLE_MODELS}")
60
+
61
+ # Construct the XenAI-specific payload
62
+ payload = {
63
+ "stream": stream,
64
+ "model": model,
65
+ "messages": messages,
66
+ "params": kwargs.get("params", {}),
67
+ "tool_servers": kwargs.get("tool_servers", []),
68
+ "features": kwargs.get("features", {"web_search": False}),
69
+ "chat_id": kwargs.get("chat_id", str(uuid.uuid4())),
70
+ "id": str(uuid.uuid4()), # Message ID
71
+ "stream_options": kwargs.get("stream_options", {"include_usage": True})
72
+ }
73
+
74
+ # Add optional OpenAI params to XenAI's 'params' field if provided
75
+ if temperature is not None: payload["params"]["temperature"] = temperature
76
+ if top_p is not None: payload["params"]["top_p"] = top_p
77
+ if max_tokens is not None: payload["params"]["max_tokens"] = max_tokens
78
+
79
+ # Generate standard OpenAI-compatible IDs
80
+ request_id = f"chatcmpl-{uuid.uuid4()}"
81
+ created_time = int(time.time())
82
+
83
+ if stream:
84
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
85
+ else:
86
+ return self._create_non_stream_from_stream(request_id, created_time, model, payload, timeout, proxies)
87
+
88
+ def _create_stream(
89
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
90
+ ) -> Generator[ChatCompletionChunk, None, None]:
91
+ """Handles the streaming response from XenAI."""
92
+ final_usage_data = None # To store usage if received
93
+ try:
94
+ response = self._client.session.post(
95
+ self._client.api_endpoint,
96
+ headers=self._client.headers,
97
+ json=payload,
98
+ stream=True,
99
+ timeout=timeout or self._client.timeout,
100
+ proxies=proxies or getattr(self._client, "proxies", None),
101
+ impersonate="chrome110",
102
+ verify=False
103
+ )
104
+
105
+ if not response.ok:
106
+ try:
107
+ error_text = response.text
108
+ except Exception:
109
+ error_text = "<Failed to read error response>"
110
+ raise IOError(
111
+ f"XenAI API Error: {response.status_code} {response.reason} - {error_text}"
112
+ )
113
+
114
+ for line_bytes in response.iter_lines():
115
+ if line_bytes:
116
+ try:
117
+ line = line_bytes.decode('utf-8').strip()
118
+ if line.startswith("data: "):
119
+ json_str = line[6:]
120
+ if json_str == "[DONE]":
121
+ break # End of stream signal
122
+
123
+ json_data = json.loads(json_str)
124
+
125
+ # Check for usage data in the chunk (often comes near the end)
126
+ if 'usage' in json_data and json_data['usage']:
127
+ final_usage_data = json_data['usage']
128
+ # Don't yield a chunk just for usage, wait for content or final chunk
129
+
130
+ if 'choices' in json_data and len(json_data['choices']) > 0:
131
+ choice_data = json_data['choices'][0]
132
+ delta_data = choice_data.get('delta', {})
133
+ finish_reason = choice_data.get('finish_reason')
134
+ content = delta_data.get('content')
135
+ role = delta_data.get('role', 'assistant') # Default role
136
+
137
+ # Only yield chunks with content or finish reason
138
+ if content is not None or finish_reason:
139
+ delta = ChoiceDelta(content=content, role=role)
140
+ choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
141
+ chunk = ChatCompletionChunk(
142
+ id=request_id,
143
+ choices=[choice],
144
+ created=created_time,
145
+ model=model,
146
+ system_fingerprint=json_data.get('system_fingerprint')
147
+ )
148
+ yield chunk
149
+
150
+ except (json.JSONDecodeError, UnicodeDecodeError):
151
+ print(f"{RED}Warning: Could not decode JSON line: {line}{RESET}")
152
+ continue
153
+ except Exception as e:
154
+ print(f"{RED}Error processing stream line: {e} - Line: {line}{RESET}")
155
+ continue
156
+
157
+ # Final chunk to ensure stream termination is signaled correctly
158
+ # (even if [DONE] was received, this confirms the generator end)
159
+ final_delta = ChoiceDelta()
160
+ # Include usage in the final chunk if available
161
+ usage_obj = None
162
+ if final_usage_data:
163
+ usage_obj = CompletionUsage(
164
+ prompt_tokens=final_usage_data.get('prompt_tokens', 0),
165
+ completion_tokens=final_usage_data.get('completion_tokens', 0),
166
+ total_tokens=final_usage_data.get('total_tokens', 0),
167
+ )
168
+
169
+ final_choice = Choice(index=0, delta=final_delta, finish_reason="stop")
170
+ final_chunk = ChatCompletionChunk(
171
+ id=request_id,
172
+ choices=[final_choice],
173
+ created=created_time,
174
+ model=model,
175
+ # system_fingerprint=..., # Can be added if available in final event
176
+ )
177
+ # Add usage to the final chunk dictionary representation if available
178
+ if hasattr(final_chunk, "model_dump"):
179
+ final_chunk_dict = final_chunk.model_dump(exclude_none=True)
180
+ else:
181
+ final_chunk_dict = final_chunk.dict(exclude_none=True)
182
+ if usage_obj:
183
+ if hasattr(usage_obj, "model_dump"):
184
+ final_chunk_dict["usage"] = usage_obj.model_dump(exclude_none=True)
185
+ else:
186
+ final_chunk_dict["usage"] = usage_obj.dict(exclude_none=True)
187
+
188
+ # Yield the final dictionary or object as needed by downstream consumers
189
+ # Yielding the object aligns better with the generator type hint
190
+ yield final_chunk
191
+
192
+
193
+ except CurlError as e:
194
+ print(f"{RED}CurlError during XenAI stream request: {e}{RESET}")
195
+ raise IOError(f"XenAI request failed due to network/curl issue: {e}") from e
196
+ except Exception as e:
197
+ print(f"{RED}Unexpected error during XenAI stream: {e}{RESET}")
198
+ error_details = ""
199
+ if hasattr(e, 'response') and e.response is not None:
200
+ error_details = f" - Status: {e.response.status_code}, Response: {e.response.text}"
201
+ raise IOError(f"XenAI stream processing failed: {e}{error_details}") from e
202
+
203
+ def _create_non_stream_from_stream(
204
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
205
+ ) -> ChatCompletion:
206
+ """Handles the non-streaming response by making a single POST request (like deepinfra)."""
207
+ try:
208
+ # Ensure stream is False for non-streaming
209
+ payload = dict(payload)
210
+ payload["stream"] = False
211
+
212
+ response = self._client.session.post(
213
+ self._client.api_endpoint,
214
+ headers=self._client.headers,
215
+ json=payload,
216
+ timeout=timeout or self._client.timeout,
217
+ proxies=proxies or getattr(self._client, "proxies", None),
218
+ impersonate="chrome110",
219
+ verify=False
220
+ )
221
+ if not response.ok:
222
+ try:
223
+ error_text = response.text
224
+ except Exception:
225
+ error_text = "<Failed to read error response>"
226
+ raise IOError(
227
+ f"XenAI API Error: {response.status_code} {response.reason} - {error_text}"
228
+ )
229
+
230
+ data = response.json()
231
+ choices_data = data.get('choices', [])
232
+ usage_data = data.get('usage', {})
233
+
234
+ choices = []
235
+ for choice_d in choices_data:
236
+ message_d = choice_d.get('message', {})
237
+ message = ChatCompletionMessage(
238
+ role=message_d.get('role', 'assistant'),
239
+ content=message_d.get('content', '')
240
+ )
241
+ choice = Choice(
242
+ index=choice_d.get('index', 0),
243
+ message=message,
244
+ finish_reason=choice_d.get('finish_reason', 'stop')
245
+ )
246
+ choices.append(choice)
247
+
248
+ usage = CompletionUsage(
249
+ prompt_tokens=usage_data.get('prompt_tokens', 0),
250
+ completion_tokens=usage_data.get('completion_tokens', 0),
251
+ total_tokens=usage_data.get('total_tokens', 0)
252
+ )
253
+
254
+ completion = ChatCompletion(
255
+ id=request_id,
256
+ choices=choices,
257
+ created=created_time,
258
+ model=data.get('model', model),
259
+ usage=usage,
260
+ )
261
+ return completion
262
+
263
+ except CurlError as e:
264
+ print(f"{RED}CurlError during XenAI non-stream request: {e}{RESET}")
265
+ raise IOError(f"XenAI request failed due to network/curl issue: {e}") from e
266
+ except Exception as e:
267
+ print(f"{RED}Unexpected error during XenAI non-stream: {e}{RESET}")
268
+ error_details = ""
269
+ if hasattr(e, 'response') and e.response is not None:
270
+ error_details = f" - Status: {e.response.status_code}, Response: {e.response.text}"
271
+ raise IOError(f"XenAI non-stream processing failed: {e}{error_details}") from e
272
+
273
+
274
+ class Chat(BaseChat):
275
+ def __init__(self, client: 'XenAI'):
276
+ self.completions = Completions(client)
277
+
278
+
279
+ class XenAI(OpenAICompatibleProvider):
280
+ """
281
+ OpenAI-compatible client for the XenAI API (chat.XenAI.xyz).
282
+
283
+ Usage:
284
+ client = XenAI()
285
+ response = client.chat.completions.create(
286
+ model="google/gemma-7b-it",
287
+ messages=[{"role": "user", "content": "Hello!"}]
288
+ )
289
+ print(response.choices[0].message.content)
290
+ """
291
+
292
+ AVAILABLE_MODELS = [
293
+ "gemini-2.5-pro-preview-05-06",
294
+ "gemini-2.5-flash-preview-05-20",
295
+ "o4-mini-high",
296
+ "grok-3-mini-fast-beta",
297
+ "grok-3-fast-beta",
298
+ "gpt-4.1",
299
+ "o3-high",
300
+ "gpt-4o-search-preview",
301
+ "gpt-4o",
302
+ "claude-sonnet-4-20250514",
303
+ "claude-sonnet-4-20250514-thinking",
304
+ "deepseek-ai/DeepSeek-V3-0324",
305
+ "deepseek-ai/DeepSeek-R1-0528",
306
+ "groq/deepseek-r1-distill-llama-70b",
307
+ "deepseek-ai/DeepSeek-Prover-V2-671B",
308
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
309
+ "meta-llama/llama-4-scout-17b-16e-instruct",
310
+ "cognitivecomputations/Dolphin3.0-Mistral-24B",
311
+ "sonar-pro",
312
+ "gpt-4o-mini",
313
+ "gemini-2.0-flash-lite-preview-02-05",
314
+ "claude-3-7-sonnet-20250219",
315
+ "claude-3-7-sonnet-20250219-thinking",
316
+ "claude-opus-4-20250514",
317
+ "claude-opus-4-20250514-thinking",
318
+ "chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8",
319
+ "chutesai/Llama-4-Scout-17B-16E-Instruct",
320
+ ]
321
+
322
+ def _auto_fetch_token(self):
323
+ """Automatically fetch a token from the signup endpoint."""
324
+ session = Session()
325
+ def random_string(length=8):
326
+ return ''.join(random.choices(string.ascii_lowercase, k=length))
327
+ name = random_string(6)
328
+ email = f"{random_string(8)}@gmail.com"
329
+ password = email
330
+ profile_image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAAAXNSR0IArs4c6QAAAkRJREFUeF7tmDFOw0AUBdcSiIaKM3CKHIQ7UHEISq5AiUTFHYC0XADoTRsJEZFEjhFIaYAim92fjGFS736/zOTZzjavl0d98oMh0CgE4+IriEJYPhQC86EQhdAIwPL4DFEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg2BCfkAIqwAA94KZ/EAAAAASUVORK5CYII="
331
+ payload = {
332
+ "name": name,
333
+ "email": email,
334
+ "password": password,
335
+ "profile_image_url": profile_image_url
336
+ }
337
+ # Add more detailed browser-like headers
338
+ try:
339
+ # First try with fingerprint from LitAgent
340
+ headers = {
341
+ **LitAgent().generate_fingerprint(),
342
+ 'origin': 'https://chat.xenai.tech',
343
+ 'referer': 'https://chat.xenai.tech/auth',
344
+ 'sec-ch-ua': '"Google Chrome";v="127", "Chromium";v="127", "Not=A?Brand";v="24"',
345
+ 'sec-ch-ua-mobile': '?0',
346
+ 'sec-ch-ua-platform': '"Windows"',
347
+ 'sec-fetch-dest': 'empty',
348
+ 'sec-fetch-mode': 'cors',
349
+ 'sec-fetch-site': 'same-origin',
350
+ 'accept-language': 'en-US,en;q=0.9'
351
+ }
352
+ except:
353
+ # Fallback to basic Chrome user agent if LitAgent fails
354
+ headers = {
355
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
356
+ 'origin': 'https://chat.xenai.tech',
357
+ 'referer': 'https://chat.xenai.tech/auth',
358
+ 'sec-ch-ua': '"Google Chrome";v="127", "Chromium";v="127", "Not=A?Brand";v="24"',
359
+ 'sec-ch-ua-mobile': '?0',
360
+ 'sec-ch-ua-platform': '"Windows"',
361
+ 'sec-fetch-dest': 'empty',
362
+ 'sec-fetch-mode': 'cors',
363
+ 'sec-fetch-site': 'same-origin',
364
+ 'accept-language': 'en-US,en;q=0.9'
365
+ }
366
+ try:
367
+ # Try signup with newer Chrome version
368
+ resp = session.post(
369
+ "https://chat.xenai.tech/api/v1/auths/signup",
370
+ headers=headers,
371
+ json=payload,
372
+ timeout=30,
373
+ impersonate="chrome120", # Try newer Chrome version
374
+ verify=False
375
+ )
376
+
377
+ if resp.ok:
378
+ data = resp.json()
379
+ token = data.get("token")
380
+ if token:
381
+ return token
382
+ # fallback: try to get from set-cookie
383
+ set_cookie = resp.headers.get("set-cookie", "")
384
+ if "token=" in set_cookie:
385
+ return set_cookie.split("token=")[1].split(";")[0]
386
+
387
+ # If signup fails, try login (account might already exist)
388
+ login_resp = session.post(
389
+ "https://chat.xenai.tech/api/v1/auths/login",
390
+ headers=headers,
391
+ json={"email": email, "password": password},
392
+ timeout=30,
393
+ impersonate="chrome120",
394
+ verify=False
395
+ )
396
+
397
+ if login_resp.ok:
398
+ data = login_resp.json()
399
+ token = data.get("token")
400
+ if token:
401
+ return token
402
+
403
+ # Try guest authentication as last resort
404
+ guest_resp = session.post(
405
+ "https://chat.xenai.tech/api/v1/auths/guest",
406
+ headers=headers,
407
+ json={},
408
+ timeout=30,
409
+ impersonate="chrome120",
410
+ verify=False
411
+ )
412
+
413
+ if guest_resp.ok:
414
+ data = guest_resp.json()
415
+ token = data.get("token")
416
+ if token:
417
+ return token
418
+
419
+ raise RuntimeError(f"Failed to auto-fetch token: {resp.status_code} {resp.text}")
420
+ except Exception as e:
421
+ raise RuntimeError(f"Token auto-fetch failed: {e}")
422
+
423
+ def __init__(
424
+ self,
425
+ timeout: int = 60,
426
+ ):
427
+ """
428
+ Initializes the XenAI OpenAI-compatible client.
429
+
430
+ Args:
431
+ timeout: Request timeout in seconds.
432
+ """
433
+ self.api_endpoint = "https://chat.xenai.tech/api/chat/completions"
434
+ self.timeout = timeout
435
+ self.token = self._auto_fetch_token()
436
+ self.session = Session() # Use curl_cffi Session
437
+
438
+ # Enhanced headers with browser impersonation
439
+ try:
440
+ self.headers = {
441
+ **LitAgent().generate_fingerprint(),
442
+ 'origin': 'https://chat.xenai.tech',
443
+ 'referer': 'https://chat.xenai.tech/',
444
+ 'sec-ch-ua': '"Google Chrome";v="127", "Chromium";v="127", "Not=A?Brand";v="24"',
445
+ 'sec-ch-ua-mobile': '?0',
446
+ 'sec-ch-ua-platform': '"Windows"',
447
+ 'sec-fetch-dest': 'empty',
448
+ 'sec-fetch-mode': 'cors',
449
+ 'sec-fetch-site': 'same-origin',
450
+ 'accept-language': 'en-US,en;q=0.9'
451
+ }
452
+ except:
453
+ self.headers = {
454
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
455
+ 'origin': 'https://chat.xenai.tech',
456
+ 'referer': 'https://chat.xenai.tech/',
457
+ 'sec-ch-ua': '"Google Chrome";v="127", "Chromium";v="127", "Not=A?Brand";v="24"',
458
+ 'sec-ch-ua-mobile': '?0',
459
+ 'sec-ch-ua-platform': '"Windows"',
460
+ 'sec-fetch-dest': 'empty',
461
+ 'sec-fetch-mode': 'cors',
462
+ 'sec-fetch-site': 'same-origin',
463
+ 'accept-language': 'en-US,en;q=0.9'
464
+ }
465
+
466
+ # Update headers and set authorization token
467
+ self.headers['authorization'] = f'Bearer {self.token}'
468
+ self.session.headers.update(self.headers)
469
+
470
+ # Configure session
471
+ self.session.impersonate = "chrome120"
472
+ self.session.verify = False
473
+
474
+ self.chat = Chat(self) # Initialize chat interface
475
+
476
+ @property
477
+ def models(self):
478
+ class _ModelList:
479
+ def list(inner_self):
480
+ return type(self).AVAILABLE_MODELS
481
+ return _ModelList()
482
+
483
+ if __name__ == "__main__":
484
+ print("-" * 100)
485
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
486
+ print("-" * 100)
487
+
488
+ test_prompt = "Say 'Hello' in one word"
489
+
490
+ client = XenAI()
491
+ for model in client.models.list():
492
+ print(f"\rTesting {model}...", end="")
493
+ try:
494
+ presp = client.chat.completions.create(
495
+ model=model,
496
+ messages=[{"role": "user", "content": test_prompt}]
497
+ )
498
+ # Try to get the response text (truncate to 100 chars)
499
+ if hasattr(presp, "choices") and presp.choices and hasattr(presp.choices[0], "message"):
500
+ content = presp.choices[0].message.content or ""
501
+ clean_text = content.strip().encode('utf-8', errors='ignore').decode('utf-8')
502
+ display_text = clean_text[:100] + "..." if len(clean_text) > 100 else clean_text
503
+ status = "✓" if clean_text else "✗"
504
+ if not clean_text:
505
+ display_text = "Empty or invalid response"
506
+ else:
507
+ status = "✗"
508
+ display_text = "Empty or invalid response"
509
+ print(f"\r{model:<50} {status:<10} {display_text}")
510
+ except Exception as e:
511
+ error_msg = str(e)
512
+ if len(error_msg) > 100:
513
+ error_msg = error_msg[:97] + "..."
514
+ print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")
@@ -5,8 +5,8 @@ import json
5
5
  from typing import List, Dict, Optional, Union, Generator, Any
6
6
 
7
7
  # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
8
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from webscout.Provider.OPENAI.utils import (
10
10
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
11
  ChatCompletionMessage, CompletionUsage, get_system_prompt, count_tokens # Import count_tokens
12
12
  )
@@ -315,6 +315,12 @@ class YEPCHAT(OpenAICompatibleProvider):
315
315
  "Sec-CH-UA-Mobile": "?0",
316
316
  "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
317
317
  "User-Agent": fingerprint["user_agent"],
318
+ "x-forwarded-for": fingerprint["x-forwarded-for"],
319
+ "x-real-ip": fingerprint["x-real-ip"],
320
+ "x-client-ip": fingerprint["x-client-ip"],
321
+ "forwarded": fingerprint["forwarded"],
322
+ "x-forwarded-proto": "https",
323
+ "x-request-id": fingerprint["x-request-id"],
318
324
  }
319
325
  self.session.headers.update(self.headers)
320
326
 
@@ -87,7 +87,7 @@ class OpenGPT(Provider):
87
87
  def ask(
88
88
  self,
89
89
  prompt: str,
90
- stream: bool = False, # Note: API does not support streaming
90
+ stream: bool = False, # Note: API does not support streaming natively
91
91
  raw: bool = False,
92
92
  optimizer: str = None,
93
93
  conversationally: bool = False,
@@ -121,38 +121,54 @@ class OpenGPT(Provider):
121
121
  "id": self.app_id,
122
122
  "userKey": "" # Assuming userKey is meant to be empty as in the original code
123
123
  }
124
-
125
- # API does not stream, implement non-stream logic directly
124
+
125
+ def for_stream():
126
+ try:
127
+ response = self.session.post(
128
+ "https://open-gpt.app/api/generate",
129
+ data=json.dumps(payload),
130
+ timeout=self.timeout,
131
+ impersonate="chrome110"
132
+ )
133
+ response.raise_for_status()
134
+ response_text = response.text
135
+ buffer = ""
136
+ chunk_size = 32
137
+ for i in range(0, len(response_text), chunk_size):
138
+ out = response_text[i:i+chunk_size]
139
+ if out.strip():
140
+ if raw:
141
+ yield out
142
+ else:
143
+ yield {"text": out}
144
+ self.last_response = {"text": response_text}
145
+ self.conversation.update_chat_history(prompt, response_text)
146
+ except CurlError as e:
147
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
148
+ except Exception as e:
149
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
150
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
151
+
126
152
  def for_non_stream():
127
153
  try:
128
- # Use curl_cffi session post with impersonate
129
154
  response = self.session.post(
130
155
  "https://open-gpt.app/api/generate",
131
- # headers are set on the session
132
- data=json.dumps(payload), # Keep data as JSON string
156
+ data=json.dumps(payload),
133
157
  timeout=self.timeout,
134
- # proxies are set on the session
135
- impersonate="chrome110" # Use a common impersonation profile
158
+ impersonate="chrome110"
136
159
  )
137
-
138
- response.raise_for_status() # Check for HTTP errors
139
-
140
- # Use response.text which is already decoded
160
+ response.raise_for_status()
141
161
  response_text = response.text
142
162
  self.last_response = {"text": response_text}
143
163
  self.conversation.update_chat_history(prompt, response_text)
144
-
145
- # Return dict or raw string based on raw flag
146
164
  return {"raw": response_text} if raw else {"text": response_text}
147
-
148
- except CurlError as e: # Catch CurlError
165
+ except CurlError as e:
149
166
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
150
- except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
167
+ except Exception as e:
151
168
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
152
169
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
153
-
154
- # This provider doesn't support streaming, so just return non-stream
155
- return for_non_stream()
170
+
171
+ return for_stream() if stream else for_non_stream()
156
172
 
157
173
  def chat(
158
174
  self,
@@ -173,21 +189,25 @@ class OpenGPT(Provider):
173
189
  Returns:
174
190
  A string with the response text.
175
191
  """
176
- # Since ask() now handles both stream=True/False by returning the full response dict:
177
- response_data = self.ask(
178
- prompt,
179
- stream=False, # Call ask in non-stream mode internally
180
- raw=False, # Ensure ask returns dict with 'text' key
181
- optimizer=optimizer,
182
- conversationally=conversationally
183
- )
184
- # If stream=True was requested, simulate streaming by yielding the full message at once
185
192
  if stream:
186
193
  def stream_wrapper():
187
- yield self.get_message(response_data) # yield only the text string
194
+ for part in self.ask(
195
+ prompt,
196
+ stream=True,
197
+ raw=False,
198
+ optimizer=optimizer,
199
+ conversationally=conversationally
200
+ ):
201
+ yield self.get_message(part) if isinstance(part, dict) else part
188
202
  return stream_wrapper()
189
203
  else:
190
- # If stream=False, return the full message directly
204
+ response_data = self.ask(
205
+ prompt,
206
+ stream=False,
207
+ raw=False,
208
+ optimizer=optimizer,
209
+ conversationally=conversationally
210
+ )
191
211
  return self.get_message(response_data)
192
212
 
193
213
  def get_message(self, response: dict) -> str:
@@ -206,4 +226,6 @@ class OpenGPT(Provider):
206
226
 
207
227
  if __name__ == "__main__":
208
228
  ai = OpenGPT()
209
- print(ai.chat("Hello, how are you?"))
229
+ response = ai.chat("write me about humans in points", stream=True)
230
+ for part in response:
231
+ print(part, end="", flush=True)