webscout 8.3__py3-none-any.whl → 8.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (120) hide show
  1. webscout/AIauto.py +4 -4
  2. webscout/AIbase.py +61 -1
  3. webscout/AIutel.py +46 -53
  4. webscout/Bing_search.py +418 -0
  5. webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
  6. webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
  7. webscout/Extra/YTToolkit/ytapi/video.py +10 -10
  8. webscout/Extra/autocoder/autocoder_utiles.py +1 -1
  9. webscout/Extra/gguf.py +706 -177
  10. webscout/Litlogger/formats.py +9 -0
  11. webscout/Litlogger/handlers.py +18 -0
  12. webscout/Litlogger/logger.py +43 -1
  13. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  14. webscout/Provider/AISEARCH/scira_search.py +3 -2
  15. webscout/Provider/GeminiProxy.py +140 -0
  16. webscout/Provider/LambdaChat.py +7 -1
  17. webscout/Provider/MCPCore.py +78 -75
  18. webscout/Provider/OPENAI/BLACKBOXAI.py +1046 -1017
  19. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  20. webscout/Provider/OPENAI/Qwen3.py +303 -303
  21. webscout/Provider/OPENAI/README.md +5 -0
  22. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  23. webscout/Provider/OPENAI/TogetherAI.py +355 -0
  24. webscout/Provider/OPENAI/__init__.py +16 -1
  25. webscout/Provider/OPENAI/autoproxy.py +332 -0
  26. webscout/Provider/OPENAI/base.py +101 -14
  27. webscout/Provider/OPENAI/chatgpt.py +15 -2
  28. webscout/Provider/OPENAI/chatgptclone.py +14 -3
  29. webscout/Provider/OPENAI/deepinfra.py +339 -328
  30. webscout/Provider/OPENAI/e2b.py +295 -74
  31. webscout/Provider/OPENAI/mcpcore.py +109 -70
  32. webscout/Provider/OPENAI/opkfc.py +18 -6
  33. webscout/Provider/OPENAI/scirachat.py +59 -50
  34. webscout/Provider/OPENAI/toolbaz.py +2 -10
  35. webscout/Provider/OPENAI/writecream.py +166 -166
  36. webscout/Provider/OPENAI/x0gpt.py +367 -367
  37. webscout/Provider/OPENAI/xenai.py +514 -0
  38. webscout/Provider/OPENAI/yep.py +389 -383
  39. webscout/Provider/STT/__init__.py +3 -0
  40. webscout/Provider/STT/base.py +281 -0
  41. webscout/Provider/STT/elevenlabs.py +265 -0
  42. webscout/Provider/TTI/__init__.py +4 -1
  43. webscout/Provider/TTI/aiarta.py +399 -365
  44. webscout/Provider/TTI/base.py +74 -2
  45. webscout/Provider/TTI/bing.py +231 -0
  46. webscout/Provider/TTI/fastflux.py +63 -30
  47. webscout/Provider/TTI/gpt1image.py +149 -0
  48. webscout/Provider/TTI/imagen.py +196 -0
  49. webscout/Provider/TTI/magicstudio.py +60 -29
  50. webscout/Provider/TTI/piclumen.py +43 -32
  51. webscout/Provider/TTI/pixelmuse.py +232 -225
  52. webscout/Provider/TTI/pollinations.py +43 -32
  53. webscout/Provider/TTI/together.py +287 -0
  54. webscout/Provider/TTI/utils.py +2 -1
  55. webscout/Provider/TTS/README.md +1 -0
  56. webscout/Provider/TTS/__init__.py +2 -1
  57. webscout/Provider/TTS/freetts.py +140 -0
  58. webscout/Provider/TTS/speechma.py +45 -39
  59. webscout/Provider/TogetherAI.py +366 -0
  60. webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
  61. webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
  62. webscout/Provider/XenAI.py +324 -0
  63. webscout/Provider/__init__.py +8 -0
  64. webscout/Provider/deepseek_assistant.py +378 -0
  65. webscout/Provider/scira_chat.py +3 -2
  66. webscout/Provider/toolbaz.py +0 -1
  67. webscout/auth/__init__.py +44 -0
  68. webscout/auth/api_key_manager.py +189 -0
  69. webscout/auth/auth_system.py +100 -0
  70. webscout/auth/config.py +76 -0
  71. webscout/auth/database.py +400 -0
  72. webscout/auth/exceptions.py +67 -0
  73. webscout/auth/middleware.py +248 -0
  74. webscout/auth/models.py +130 -0
  75. webscout/auth/providers.py +257 -0
  76. webscout/auth/rate_limiter.py +254 -0
  77. webscout/auth/request_models.py +127 -0
  78. webscout/auth/request_processing.py +226 -0
  79. webscout/auth/routes.py +526 -0
  80. webscout/auth/schemas.py +103 -0
  81. webscout/auth/server.py +312 -0
  82. webscout/auth/static/favicon.svg +11 -0
  83. webscout/auth/swagger_ui.py +203 -0
  84. webscout/auth/templates/components/authentication.html +237 -0
  85. webscout/auth/templates/components/base.html +103 -0
  86. webscout/auth/templates/components/endpoints.html +750 -0
  87. webscout/auth/templates/components/examples.html +491 -0
  88. webscout/auth/templates/components/footer.html +75 -0
  89. webscout/auth/templates/components/header.html +27 -0
  90. webscout/auth/templates/components/models.html +286 -0
  91. webscout/auth/templates/components/navigation.html +70 -0
  92. webscout/auth/templates/static/api.js +455 -0
  93. webscout/auth/templates/static/icons.js +168 -0
  94. webscout/auth/templates/static/main.js +784 -0
  95. webscout/auth/templates/static/particles.js +201 -0
  96. webscout/auth/templates/static/styles.css +3353 -0
  97. webscout/auth/templates/static/ui.js +374 -0
  98. webscout/auth/templates/swagger_ui.html +170 -0
  99. webscout/client.py +49 -3
  100. webscout/litagent/Readme.md +12 -3
  101. webscout/litagent/agent.py +99 -62
  102. webscout/scout/core/scout.py +104 -26
  103. webscout/scout/element.py +139 -18
  104. webscout/swiftcli/core/cli.py +14 -3
  105. webscout/swiftcli/decorators/output.py +59 -9
  106. webscout/update_checker.py +31 -49
  107. webscout/version.py +1 -1
  108. webscout/webscout_search.py +4 -12
  109. webscout/webscout_search_async.py +3 -10
  110. webscout/yep_search.py +2 -11
  111. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/METADATA +41 -11
  112. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/RECORD +116 -68
  113. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/entry_points.txt +1 -1
  114. webscout/Provider/HF_space/__init__.py +0 -0
  115. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  116. webscout/Provider/OPENAI/api.py +0 -1035
  117. webscout/Provider/TTI/artbit.py +0 -0
  118. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/WHEEL +0 -0
  119. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/licenses/LICENSE.md +0 -0
  120. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/top_level.txt +0 -0
@@ -1,367 +1,367 @@
1
- import time
2
- import uuid
3
- import requests
4
- import re
5
- import json
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- # Import base classes and utility structures
9
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
- from .utils import (
11
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
- ChatCompletionMessage, CompletionUsage, count_tokens
13
- )
14
-
15
- # Import LitAgent
16
- from webscout.litagent import LitAgent
17
-
18
- # --- X0GPT Client ---
19
-
20
- class Completions(BaseCompletions):
21
- def __init__(self, client: 'X0GPT'):
22
- self._client = client
23
-
24
- def create(
25
- self,
26
- *,
27
- model: str,
28
- messages: List[Dict[str, str]],
29
- max_tokens: Optional[int] = 2049,
30
- stream: bool = False,
31
- temperature: Optional[float] = None,
32
- top_p: Optional[float] = None,
33
- timeout: Optional[int] = None,
34
- proxies: Optional[Dict[str, str]] = None,
35
- **kwargs: Any
36
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
- """
38
- Creates a model response for the given chat conversation.
39
- Mimics openai.chat.completions.create
40
- """
41
- # Prepare the payload for X0GPT API
42
- payload = {
43
- "messages": messages,
44
- "chatId": uuid.uuid4().hex,
45
- "namespace": None
46
- }
47
-
48
- # Add optional parameters if provided
49
- if max_tokens is not None and max_tokens > 0:
50
- payload["max_tokens"] = max_tokens
51
-
52
- if temperature is not None:
53
- payload["temperature"] = temperature
54
-
55
- if top_p is not None:
56
- payload["top_p"] = top_p
57
-
58
- # Add any additional parameters
59
- payload.update(kwargs)
60
-
61
- request_id = f"chatcmpl-{uuid.uuid4()}"
62
- created_time = int(time.time())
63
-
64
- if stream:
65
- return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
66
- else:
67
- return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
68
-
69
- def _create_stream(
70
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
71
- ) -> Generator[ChatCompletionChunk, None, None]:
72
- try:
73
- response = self._client.session.post(
74
- self._client.api_endpoint,
75
- headers=self._client.headers,
76
- json=payload,
77
- stream=True,
78
- timeout=timeout or self._client.timeout,
79
- proxies=proxies or getattr(self._client, "proxies", None)
80
- )
81
-
82
- # Handle non-200 responses
83
- if not response.ok:
84
- raise IOError(
85
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
86
- )
87
-
88
- # Use count_tokens for prompt tokens
89
- prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
90
- completion_tokens = 0
91
- total_tokens = 0
92
-
93
- for line in response.iter_lines():
94
- if line:
95
- decoded_line = line.decode('utf-8').strip()
96
-
97
- # X0GPT uses a different format, so we need to extract the content
98
- match = re.search(r'0:"(.*?)"', decoded_line)
99
- if match:
100
- content = match.group(1)
101
-
102
- # Format the content (replace escaped newlines)
103
- content = self._client.format_text(content)
104
-
105
- # Update token counts using count_tokens
106
- completion_tokens += count_tokens(content)
107
- total_tokens = prompt_tokens + completion_tokens
108
-
109
- # Create the delta object
110
- delta = ChoiceDelta(
111
- content=content,
112
- role="assistant",
113
- tool_calls=None
114
- )
115
-
116
- # Create the choice object
117
- choice = Choice(
118
- index=0,
119
- delta=delta,
120
- finish_reason=None,
121
- logprobs=None
122
- )
123
-
124
- # Create the chunk object
125
- chunk = ChatCompletionChunk(
126
- id=request_id,
127
- choices=[choice],
128
- created=created_time,
129
- model=model,
130
- system_fingerprint=None
131
- )
132
-
133
- # Set usage directly on the chunk object
134
- chunk.usage = {
135
- "prompt_tokens": prompt_tokens,
136
- "completion_tokens": completion_tokens,
137
- "total_tokens": total_tokens,
138
- "estimated_cost": None
139
- }
140
-
141
- # Return the chunk object with usage information
142
- yield chunk
143
-
144
- # Final chunk with finish_reason="stop"
145
- delta = ChoiceDelta(
146
- content=None,
147
- role=None,
148
- tool_calls=None
149
- )
150
-
151
- choice = Choice(
152
- index=0,
153
- delta=delta,
154
- finish_reason="stop",
155
- logprobs=None
156
- )
157
-
158
- chunk = ChatCompletionChunk(
159
- id=request_id,
160
- choices=[choice],
161
- created=created_time,
162
- model=model,
163
- system_fingerprint=None
164
- )
165
-
166
- # Set usage directly on the chunk object
167
- chunk.usage = {
168
- "prompt_tokens": prompt_tokens,
169
- "completion_tokens": completion_tokens,
170
- "total_tokens": total_tokens,
171
- "estimated_cost": None
172
- }
173
-
174
- yield chunk
175
-
176
- except Exception as e:
177
- print(f"Error during X0GPT stream request: {e}")
178
- raise IOError(f"X0GPT request failed: {e}") from e
179
-
180
- def _create_non_stream(
181
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
182
- ) -> ChatCompletion:
183
- try:
184
- response = self._client.session.post(
185
- self._client.api_endpoint,
186
- headers=self._client.headers,
187
- json=payload,
188
- stream=True,
189
- timeout=timeout or self._client.timeout,
190
- proxies=proxies or getattr(self._client, "proxies", None)
191
- )
192
-
193
- # Handle non-200 responses
194
- if not response.ok:
195
- raise IOError(
196
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
197
- )
198
-
199
- # Collect the full response
200
- full_text = ""
201
- for line in response.iter_lines(decode_unicode=True):
202
- if line:
203
- match = re.search(r'0:"(.*?)"', line)
204
- if match:
205
- content = match.group(1)
206
- full_text += content
207
-
208
- # Format the text (replace escaped newlines)
209
- full_text = self._client.format_text(full_text)
210
-
211
- # Use count_tokens for accurate token counts
212
- prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
213
- completion_tokens = count_tokens(full_text)
214
- total_tokens = prompt_tokens + completion_tokens
215
-
216
- # Create the message object
217
- message = ChatCompletionMessage(
218
- role="assistant",
219
- content=full_text
220
- )
221
-
222
- # Create the choice object
223
- choice = Choice(
224
- index=0,
225
- message=message,
226
- finish_reason="stop"
227
- )
228
-
229
- # Create the usage object
230
- usage = CompletionUsage(
231
- prompt_tokens=prompt_tokens,
232
- completion_tokens=completion_tokens,
233
- total_tokens=total_tokens
234
- )
235
-
236
- # Create the completion object
237
- completion = ChatCompletion(
238
- id=request_id,
239
- choices=[choice],
240
- created=created_time,
241
- model=model,
242
- usage=usage,
243
- )
244
-
245
- return completion
246
-
247
- except Exception as e:
248
- print(f"Error during X0GPT non-stream request: {e}")
249
- raise IOError(f"X0GPT request failed: {e}") from e
250
-
251
- class Chat(BaseChat):
252
- def __init__(self, client: 'X0GPT'):
253
- self.completions = Completions(client)
254
-
255
- class X0GPT(OpenAICompatibleProvider):
256
- """
257
- OpenAI-compatible client for X0GPT API.
258
-
259
- Usage:
260
- client = X0GPT()
261
- response = client.chat.completions.create(
262
- model="X0GPT",
263
- messages=[{"role": "user", "content": "Hello!"}]
264
- )
265
- """
266
-
267
- AVAILABLE_MODELS = ["X0GPT"]
268
-
269
- def __init__(
270
- self,
271
- browser: str = "chrome"
272
- ):
273
- """
274
- Initialize the X0GPT client.
275
-
276
- Args:
277
- browser: Browser to emulate in user agent
278
- """
279
- self.timeout = None
280
- self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
281
- self.session = requests.Session()
282
-
283
- # Initialize LitAgent for user agent generation
284
- agent = LitAgent()
285
- self.fingerprint = agent.generate_fingerprint(browser)
286
-
287
- self.headers = {
288
- "authority": "x0-gpt.devwtf.in",
289
- "method": "POST",
290
- "path": "/api/stream/reply",
291
- "scheme": "https",
292
- "accept": self.fingerprint["accept"],
293
- "accept-encoding": "gzip, deflate, br, zstd",
294
- "accept-language": self.fingerprint["accept_language"],
295
- "content-type": "application/json",
296
- "dnt": "1",
297
- "origin": "https://x0-gpt.devwtf.in",
298
- "priority": "u=1, i",
299
- "referer": "https://x0-gpt.devwtf.in/chat",
300
- "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
301
- "sec-ch-ua-mobile": "?0",
302
- "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
303
- "user-agent": self.fingerprint["user_agent"]
304
- }
305
-
306
- self.session.headers.update(self.headers)
307
-
308
- # Initialize the chat interface
309
- self.chat = Chat(self)
310
-
311
- @property
312
- def models(self):
313
- class _ModelList:
314
- def list(inner_self):
315
- return X0GPT.AVAILABLE_MODELS
316
- return _ModelList()
317
-
318
- def format_text(self, text: str) -> str:
319
- """
320
- Format text by replacing escaped newlines with actual newlines.
321
-
322
- Args:
323
- text: Text to format
324
-
325
- Returns:
326
- Formatted text
327
- """
328
- # Use a more comprehensive approach to handle all escape sequences
329
- try:
330
- # First handle double backslashes to avoid issues
331
- text = text.replace('\\\\', '\\')
332
-
333
- # Handle common escape sequences
334
- text = text.replace('\\n', '\n')
335
- text = text.replace('\\r', '\r')
336
- text = text.replace('\\t', '\t')
337
- text = text.replace('\\"', '"')
338
- text = text.replace("\\'", "'")
339
-
340
- # Handle any remaining escape sequences using JSON decoding
341
- # This is a fallback in case there are other escape sequences
342
- try:
343
- # Add quotes to make it a valid JSON string
344
- json_str = f'"{text}"'
345
- # Use json module to decode all escape sequences
346
- decoded = json.loads(json_str)
347
- return decoded
348
- except json.JSONDecodeError:
349
- # If JSON decoding fails, return the text with the replacements we've already done
350
- return text
351
- except Exception as e:
352
- # If any error occurs, return the original text
353
- print(f"Warning: Error formatting text: {e}")
354
- return text
355
-
356
- def convert_model_name(self, model: str) -> str:
357
- """
358
- Convert model names to ones supported by X0GPT.
359
-
360
- Args:
361
- model: Model name to convert
362
-
363
- Returns:
364
- X0GPT model name
365
- """
366
- # X0GPT doesn't actually use model names, but we'll keep this for compatibility
367
- return model
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import re
5
+ import json
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage, count_tokens
13
+ )
14
+
15
+ # Import LitAgent
16
+ from webscout.litagent import LitAgent
17
+
18
+ # --- X0GPT Client ---
19
+
20
+ class Completions(BaseCompletions):
21
+ def __init__(self, client: 'X0GPT'):
22
+ self._client = client
23
+
24
+ def create(
25
+ self,
26
+ *,
27
+ model: str,
28
+ messages: List[Dict[str, str]],
29
+ max_tokens: Optional[int] = 2049,
30
+ stream: bool = False,
31
+ temperature: Optional[float] = None,
32
+ top_p: Optional[float] = None,
33
+ timeout: Optional[int] = None,
34
+ proxies: Optional[Dict[str, str]] = None,
35
+ **kwargs: Any
36
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
+ """
38
+ Creates a model response for the given chat conversation.
39
+ Mimics openai.chat.completions.create
40
+ """
41
+ # Prepare the payload for X0GPT API
42
+ payload = {
43
+ "messages": messages,
44
+ "chatId": uuid.uuid4().hex,
45
+ "namespace": None
46
+ }
47
+
48
+ # Add optional parameters if provided
49
+ if max_tokens is not None and max_tokens > 0:
50
+ payload["max_tokens"] = max_tokens
51
+
52
+ if temperature is not None:
53
+ payload["temperature"] = temperature
54
+
55
+ if top_p is not None:
56
+ payload["top_p"] = top_p
57
+
58
+ # Add any additional parameters
59
+ payload.update(kwargs)
60
+
61
+ request_id = f"chatcmpl-{uuid.uuid4()}"
62
+ created_time = int(time.time())
63
+
64
+ if stream:
65
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
66
+ else:
67
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
68
+
69
+ def _create_stream(
70
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
71
+ ) -> Generator[ChatCompletionChunk, None, None]:
72
+ try:
73
+ response = self._client.session.post(
74
+ self._client.api_endpoint,
75
+ headers=self._client.headers,
76
+ json=payload,
77
+ stream=True,
78
+ timeout=timeout or self._client.timeout,
79
+ proxies=proxies or getattr(self._client, "proxies", None)
80
+ )
81
+
82
+ # Handle non-200 responses
83
+ if not response.ok:
84
+ raise IOError(
85
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
86
+ )
87
+
88
+ # Use count_tokens for prompt tokens
89
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
90
+ completion_tokens = 0
91
+ total_tokens = 0
92
+
93
+ for line in response.iter_lines():
94
+ if line:
95
+ decoded_line = line.decode('utf-8').strip()
96
+
97
+ # X0GPT uses a different format, so we need to extract the content
98
+ match = re.search(r'0:"(.*?)"', decoded_line)
99
+ if match:
100
+ content = match.group(1)
101
+
102
+ # Format the content (replace escaped newlines)
103
+ content = self._client.format_text(content)
104
+
105
+ # Update token counts using count_tokens
106
+ completion_tokens += count_tokens(content)
107
+ total_tokens = prompt_tokens + completion_tokens
108
+
109
+ # Create the delta object
110
+ delta = ChoiceDelta(
111
+ content=content,
112
+ role="assistant",
113
+ tool_calls=None
114
+ )
115
+
116
+ # Create the choice object
117
+ choice = Choice(
118
+ index=0,
119
+ delta=delta,
120
+ finish_reason=None,
121
+ logprobs=None
122
+ )
123
+
124
+ # Create the chunk object
125
+ chunk = ChatCompletionChunk(
126
+ id=request_id,
127
+ choices=[choice],
128
+ created=created_time,
129
+ model=model,
130
+ system_fingerprint=None
131
+ )
132
+
133
+ # Set usage directly on the chunk object
134
+ chunk.usage = {
135
+ "prompt_tokens": prompt_tokens,
136
+ "completion_tokens": completion_tokens,
137
+ "total_tokens": total_tokens,
138
+ "estimated_cost": None
139
+ }
140
+
141
+ # Return the chunk object with usage information
142
+ yield chunk
143
+
144
+ # Final chunk with finish_reason="stop"
145
+ delta = ChoiceDelta(
146
+ content=None,
147
+ role=None,
148
+ tool_calls=None
149
+ )
150
+
151
+ choice = Choice(
152
+ index=0,
153
+ delta=delta,
154
+ finish_reason="stop",
155
+ logprobs=None
156
+ )
157
+
158
+ chunk = ChatCompletionChunk(
159
+ id=request_id,
160
+ choices=[choice],
161
+ created=created_time,
162
+ model=model,
163
+ system_fingerprint=None
164
+ )
165
+
166
+ # Set usage directly on the chunk object
167
+ chunk.usage = {
168
+ "prompt_tokens": prompt_tokens,
169
+ "completion_tokens": completion_tokens,
170
+ "total_tokens": total_tokens,
171
+ "estimated_cost": None
172
+ }
173
+
174
+ yield chunk
175
+
176
+ except Exception as e:
177
+ print(f"Error during X0GPT stream request: {e}")
178
+ raise IOError(f"X0GPT request failed: {e}") from e
179
+
180
+ def _create_non_stream(
181
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
182
+ ) -> ChatCompletion:
183
+ try:
184
+ response = self._client.session.post(
185
+ self._client.api_endpoint,
186
+ headers=self._client.headers,
187
+ json=payload,
188
+ stream=True,
189
+ timeout=timeout or self._client.timeout,
190
+ proxies=proxies or getattr(self._client, "proxies", None)
191
+ )
192
+
193
+ # Handle non-200 responses
194
+ if not response.ok:
195
+ raise IOError(
196
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
197
+ )
198
+
199
+ # Collect the full response
200
+ full_text = ""
201
+ for line in response.iter_lines(decode_unicode=True):
202
+ if line:
203
+ match = re.search(r'0:"(.*?)"', line)
204
+ if match:
205
+ content = match.group(1)
206
+ full_text += content
207
+
208
+ # Format the text (replace escaped newlines)
209
+ full_text = self._client.format_text(full_text)
210
+
211
+ # Use count_tokens for accurate token counts
212
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
213
+ completion_tokens = count_tokens(full_text)
214
+ total_tokens = prompt_tokens + completion_tokens
215
+
216
+ # Create the message object
217
+ message = ChatCompletionMessage(
218
+ role="assistant",
219
+ content=full_text
220
+ )
221
+
222
+ # Create the choice object
223
+ choice = Choice(
224
+ index=0,
225
+ message=message,
226
+ finish_reason="stop"
227
+ )
228
+
229
+ # Create the usage object
230
+ usage = CompletionUsage(
231
+ prompt_tokens=prompt_tokens,
232
+ completion_tokens=completion_tokens,
233
+ total_tokens=total_tokens
234
+ )
235
+
236
+ # Create the completion object
237
+ completion = ChatCompletion(
238
+ id=request_id,
239
+ choices=[choice],
240
+ created=created_time,
241
+ model=model,
242
+ usage=usage,
243
+ )
244
+
245
+ return completion
246
+
247
+ except Exception as e:
248
+ print(f"Error during X0GPT non-stream request: {e}")
249
+ raise IOError(f"X0GPT request failed: {e}") from e
250
+
251
+ class Chat(BaseChat):
252
+ def __init__(self, client: 'X0GPT'):
253
+ self.completions = Completions(client)
254
+
255
+ class X0GPT(OpenAICompatibleProvider):
256
+ """
257
+ OpenAI-compatible client for X0GPT API.
258
+
259
+ Usage:
260
+ client = X0GPT()
261
+ response = client.chat.completions.create(
262
+ model="X0GPT",
263
+ messages=[{"role": "user", "content": "Hello!"}]
264
+ )
265
+ """
266
+
267
+ AVAILABLE_MODELS = ["X0GPT"]
268
+
269
+ def __init__(
270
+ self,
271
+ browser: str = "chrome"
272
+ ):
273
+ """
274
+ Initialize the X0GPT client.
275
+
276
+ Args:
277
+ browser: Browser to emulate in user agent
278
+ """
279
+ self.timeout = None
280
+ self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
281
+ self.session = requests.Session()
282
+
283
+ # Initialize LitAgent for user agent generation
284
+ agent = LitAgent()
285
+ self.fingerprint = agent.generate_fingerprint(browser)
286
+
287
+ self.headers = {
288
+ "authority": "x0-gpt.devwtf.in",
289
+ "method": "POST",
290
+ "path": "/api/stream/reply",
291
+ "scheme": "https",
292
+ "accept": self.fingerprint["accept"],
293
+ "accept-encoding": "gzip, deflate, br, zstd",
294
+ "accept-language": self.fingerprint["accept_language"],
295
+ "content-type": "application/json",
296
+ "dnt": "1",
297
+ "origin": "https://x0-gpt.devwtf.in",
298
+ "priority": "u=1, i",
299
+ "referer": "https://x0-gpt.devwtf.in/chat",
300
+ "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
301
+ "sec-ch-ua-mobile": "?0",
302
+ "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
303
+ "user-agent": self.fingerprint["user_agent"]
304
+ }
305
+
306
+ self.session.headers.update(self.headers)
307
+
308
+ # Initialize the chat interface
309
+ self.chat = Chat(self)
310
+
311
+ @property
312
+ def models(self):
313
+ class _ModelList:
314
+ def list(inner_self):
315
+ return X0GPT.AVAILABLE_MODELS
316
+ return _ModelList()
317
+
318
+ def format_text(self, text: str) -> str:
319
+ """
320
+ Format text by replacing escaped newlines with actual newlines.
321
+
322
+ Args:
323
+ text: Text to format
324
+
325
+ Returns:
326
+ Formatted text
327
+ """
328
+ # Use a more comprehensive approach to handle all escape sequences
329
+ try:
330
+ # First handle double backslashes to avoid issues
331
+ text = text.replace('\\\\', '\\')
332
+
333
+ # Handle common escape sequences
334
+ text = text.replace('\\n', '\n')
335
+ text = text.replace('\\r', '\r')
336
+ text = text.replace('\\t', '\t')
337
+ text = text.replace('\\"', '"')
338
+ text = text.replace("\\'", "'")
339
+
340
+ # Handle any remaining escape sequences using JSON decoding
341
+ # This is a fallback in case there are other escape sequences
342
+ try:
343
+ # Add quotes to make it a valid JSON string
344
+ json_str = f'"{text}"'
345
+ # Use json module to decode all escape sequences
346
+ decoded = json.loads(json_str)
347
+ return decoded
348
+ except json.JSONDecodeError:
349
+ # If JSON decoding fails, return the text with the replacements we've already done
350
+ return text
351
+ except Exception as e:
352
+ # If any error occurs, return the original text
353
+ print(f"Warning: Error formatting text: {e}")
354
+ return text
355
+
356
+ def convert_model_name(self, model: str) -> str:
357
+ """
358
+ Convert model names to ones supported by X0GPT.
359
+
360
+ Args:
361
+ model: Model name to convert
362
+
363
+ Returns:
364
+ X0GPT model name
365
+ """
366
+ # X0GPT doesn't actually use model names, but we'll keep this for compatibility
367
+ return model