webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -1,378 +1,365 @@
1
- import time
2
- import uuid
3
- import requests
4
- import re
5
- import json
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- # Import base classes and utility structures
9
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
- from .utils import (
11
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
- ChatCompletionMessage, CompletionUsage
13
- )
14
-
15
- # Import LitAgent
16
- from webscout.litagent import LitAgent
17
-
18
- # --- X0GPT Client ---
19
-
20
- class Completions(BaseCompletions):
21
- def __init__(self, client: 'X0GPT'):
22
- self._client = client
23
-
24
- def create(
25
- self,
26
- *,
27
- model: str,
28
- messages: List[Dict[str, str]],
29
- max_tokens: Optional[int] = 2049,
30
- stream: bool = False,
31
- temperature: Optional[float] = None,
32
- top_p: Optional[float] = None,
33
- **kwargs: Any
34
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
- """
36
- Creates a model response for the given chat conversation.
37
- Mimics openai.chat.completions.create
38
- """
39
- # Prepare the payload for X0GPT API
40
- payload = {
41
- "messages": messages,
42
- "chatId": uuid.uuid4().hex,
43
- "namespace": None
44
- }
45
-
46
- # Add optional parameters if provided
47
- if max_tokens is not None and max_tokens > 0:
48
- payload["max_tokens"] = max_tokens
49
-
50
- if temperature is not None:
51
- payload["temperature"] = temperature
52
-
53
- if top_p is not None:
54
- payload["top_p"] = top_p
55
-
56
- # Add any additional parameters
57
- payload.update(kwargs)
58
-
59
- request_id = f"chatcmpl-{uuid.uuid4()}"
60
- created_time = int(time.time())
61
-
62
- if stream:
63
- return self._create_stream(request_id, created_time, model, payload)
64
- else:
65
- return self._create_non_stream(request_id, created_time, model, payload)
66
-
67
- def _create_stream(
68
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
69
- ) -> Generator[ChatCompletionChunk, None, None]:
70
- try:
71
- response = self._client.session.post(
72
- self._client.api_endpoint,
73
- headers=self._client.headers,
74
- json=payload,
75
- stream=True,
76
- timeout=self._client.timeout
77
- )
78
-
79
- # Handle non-200 responses
80
- if not response.ok:
81
- raise IOError(
82
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
83
- )
84
-
85
- # Track token usage across chunks
86
- prompt_tokens = 0
87
- completion_tokens = 0
88
- total_tokens = 0
89
-
90
- # Estimate prompt tokens based on message length
91
- for msg in payload.get("messages", []):
92
- prompt_tokens += len(msg.get("content", "").split())
93
-
94
- for line in response.iter_lines():
95
- if line:
96
- decoded_line = line.decode('utf-8').strip()
97
-
98
- # X0GPT uses a different format, so we need to extract the content
99
- match = re.search(r'0:"(.*?)"', decoded_line)
100
- if match:
101
- content = match.group(1)
102
-
103
- # Format the content (replace escaped newlines)
104
- content = self._client.format_text(content)
105
-
106
- # Update token counts
107
- completion_tokens += 1
108
- total_tokens = prompt_tokens + completion_tokens
109
-
110
- # Create the delta object
111
- delta = ChoiceDelta(
112
- content=content,
113
- role="assistant",
114
- tool_calls=None
115
- )
116
-
117
- # Create the choice object
118
- choice = Choice(
119
- index=0,
120
- delta=delta,
121
- finish_reason=None,
122
- logprobs=None
123
- )
124
-
125
- # Create the chunk object
126
- chunk = ChatCompletionChunk(
127
- id=request_id,
128
- choices=[choice],
129
- created=created_time,
130
- model=model,
131
- system_fingerprint=None
132
- )
133
-
134
- # Convert to dict for proper formatting
135
- chunk_dict = chunk.to_dict()
136
-
137
- # Add usage information to match OpenAI format
138
- usage_dict = {
139
- "prompt_tokens": prompt_tokens,
140
- "completion_tokens": completion_tokens,
141
- "total_tokens": total_tokens,
142
- "estimated_cost": None
143
- }
144
-
145
- chunk_dict["usage"] = usage_dict
146
-
147
- # Return the chunk object for internal processing
148
- yield chunk
149
-
150
- # Final chunk with finish_reason="stop"
151
- delta = ChoiceDelta(
152
- content=None,
153
- role=None,
154
- tool_calls=None
155
- )
156
-
157
- choice = Choice(
158
- index=0,
159
- delta=delta,
160
- finish_reason="stop",
161
- logprobs=None
162
- )
163
-
164
- chunk = ChatCompletionChunk(
165
- id=request_id,
166
- choices=[choice],
167
- created=created_time,
168
- model=model,
169
- system_fingerprint=None
170
- )
171
-
172
- chunk_dict = chunk.to_dict()
173
- chunk_dict["usage"] = {
174
- "prompt_tokens": prompt_tokens,
175
- "completion_tokens": completion_tokens,
176
- "total_tokens": total_tokens,
177
- "estimated_cost": None
178
- }
179
-
180
- yield chunk
181
-
182
- except Exception as e:
183
- print(f"Error during X0GPT stream request: {e}")
184
- raise IOError(f"X0GPT request failed: {e}") from e
185
-
186
- def _create_non_stream(
187
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
188
- ) -> ChatCompletion:
189
- try:
190
- # For non-streaming, we still use streaming internally to collect the full response
191
- response = self._client.session.post(
192
- self._client.api_endpoint,
193
- headers=self._client.headers,
194
- json=payload,
195
- stream=True,
196
- timeout=self._client.timeout
197
- )
198
-
199
- # Handle non-200 responses
200
- if not response.ok:
201
- raise IOError(
202
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
203
- )
204
-
205
- # Collect the full response
206
- full_text = ""
207
- for line in response.iter_lines(decode_unicode=True):
208
- if line:
209
- match = re.search(r'0:"(.*?)"', line)
210
- if match:
211
- content = match.group(1)
212
- full_text += content
213
-
214
- # Format the text (replace escaped newlines)
215
- full_text = self._client.format_text(full_text)
216
-
217
- # Estimate token counts
218
- prompt_tokens = 0
219
- for msg in payload.get("messages", []):
220
- prompt_tokens += len(msg.get("content", "").split())
221
-
222
- completion_tokens = len(full_text.split())
223
- total_tokens = prompt_tokens + completion_tokens
224
-
225
- # Create the message object
226
- message = ChatCompletionMessage(
227
- role="assistant",
228
- content=full_text
229
- )
230
-
231
- # Create the choice object
232
- choice = Choice(
233
- index=0,
234
- message=message,
235
- finish_reason="stop"
236
- )
237
-
238
- # Create the usage object
239
- usage = CompletionUsage(
240
- prompt_tokens=prompt_tokens,
241
- completion_tokens=completion_tokens,
242
- total_tokens=total_tokens
243
- )
244
-
245
- # Create the completion object
246
- completion = ChatCompletion(
247
- id=request_id,
248
- choices=[choice],
249
- created=created_time,
250
- model=model,
251
- usage=usage,
252
- )
253
-
254
- return completion
255
-
256
- except Exception as e:
257
- print(f"Error during X0GPT non-stream request: {e}")
258
- raise IOError(f"X0GPT request failed: {e}") from e
259
-
260
- class Chat(BaseChat):
261
- def __init__(self, client: 'X0GPT'):
262
- self.completions = Completions(client)
263
-
264
- class X0GPT(OpenAICompatibleProvider):
265
- """
266
- OpenAI-compatible client for X0GPT API.
267
-
268
- Usage:
269
- client = X0GPT()
270
- response = client.chat.completions.create(
271
- model="gpt-4",
272
- messages=[{"role": "user", "content": "Hello!"}]
273
- )
274
- """
275
-
276
- AVAILABLE_MODELS = ["gpt-4", "gpt-3.5-turbo"]
277
-
278
- def __init__(
279
- self,
280
- timeout: Optional[int] = None,
281
- browser: str = "chrome"
282
- ):
283
- """
284
- Initialize the X0GPT client.
285
-
286
- Args:
287
- timeout: Request timeout in seconds (None for no timeout)
288
- browser: Browser to emulate in user agent
289
- """
290
- self.timeout = timeout
291
- self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
292
- self.session = requests.Session()
293
-
294
- # Initialize LitAgent for user agent generation
295
- agent = LitAgent()
296
- self.fingerprint = agent.generate_fingerprint(browser)
297
-
298
- self.headers = {
299
- "authority": "x0-gpt.devwtf.in",
300
- "method": "POST",
301
- "path": "/api/stream/reply",
302
- "scheme": "https",
303
- "accept": self.fingerprint["accept"],
304
- "accept-encoding": "gzip, deflate, br, zstd",
305
- "accept-language": self.fingerprint["accept_language"],
306
- "content-type": "application/json",
307
- "dnt": "1",
308
- "origin": "https://x0-gpt.devwtf.in",
309
- "priority": "u=1, i",
310
- "referer": "https://x0-gpt.devwtf.in/chat",
311
- "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
312
- "sec-ch-ua-mobile": "?0",
313
- "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
314
- "user-agent": self.fingerprint["user_agent"]
315
- }
316
-
317
- self.session.headers.update(self.headers)
318
-
319
- # Initialize the chat interface
320
- self.chat = Chat(self)
321
-
322
- @property
323
- def models(self):
324
- class _ModelList:
325
- def list(inner_self):
326
- return X0GPT.AVAILABLE_MODELS
327
- return _ModelList()
328
-
329
- def format_text(self, text: str) -> str:
330
- """
331
- Format text by replacing escaped newlines with actual newlines.
332
-
333
- Args:
334
- text: Text to format
335
-
336
- Returns:
337
- Formatted text
338
- """
339
- # Use a more comprehensive approach to handle all escape sequences
340
- try:
341
- # First handle double backslashes to avoid issues
342
- text = text.replace('\\\\', '\\')
343
-
344
- # Handle common escape sequences
345
- text = text.replace('\\n', '\n')
346
- text = text.replace('\\r', '\r')
347
- text = text.replace('\\t', '\t')
348
- text = text.replace('\\"', '"')
349
- text = text.replace("\\'", "'")
350
-
351
- # Handle any remaining escape sequences using JSON decoding
352
- # This is a fallback in case there are other escape sequences
353
- try:
354
- # Add quotes to make it a valid JSON string
355
- json_str = f'"{text}"'
356
- # Use json module to decode all escape sequences
357
- decoded = json.loads(json_str)
358
- return decoded
359
- except json.JSONDecodeError:
360
- # If JSON decoding fails, return the text with the replacements we've already done
361
- return text
362
- except Exception as e:
363
- # If any error occurs, return the original text
364
- print(f"Warning: Error formatting text: {e}")
365
- return text
366
-
367
- def convert_model_name(self, model: str) -> str:
368
- """
369
- Convert model names to ones supported by X0GPT.
370
-
371
- Args:
372
- model: Model name to convert
373
-
374
- Returns:
375
- X0GPT model name
376
- """
377
- # X0GPT doesn't actually use model names, but we'll keep this for compatibility
378
- return model
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import re
5
+ import json
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage, count_tokens
13
+ )
14
+
15
+ # Import LitAgent
16
+ from webscout.litagent import LitAgent
17
+
18
+ # --- X0GPT Client ---
19
+
20
+ class Completions(BaseCompletions):
21
+ def __init__(self, client: 'X0GPT'):
22
+ self._client = client
23
+
24
+ def create(
25
+ self,
26
+ *,
27
+ model: str,
28
+ messages: List[Dict[str, str]],
29
+ max_tokens: Optional[int] = 2049,
30
+ stream: bool = False,
31
+ temperature: Optional[float] = None,
32
+ top_p: Optional[float] = None,
33
+ **kwargs: Any
34
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
+ """
36
+ Creates a model response for the given chat conversation.
37
+ Mimics openai.chat.completions.create
38
+ """
39
+ # Prepare the payload for X0GPT API
40
+ payload = {
41
+ "messages": messages,
42
+ "chatId": uuid.uuid4().hex,
43
+ "namespace": None
44
+ }
45
+
46
+ # Add optional parameters if provided
47
+ if max_tokens is not None and max_tokens > 0:
48
+ payload["max_tokens"] = max_tokens
49
+
50
+ if temperature is not None:
51
+ payload["temperature"] = temperature
52
+
53
+ if top_p is not None:
54
+ payload["top_p"] = top_p
55
+
56
+ # Add any additional parameters
57
+ payload.update(kwargs)
58
+
59
+ request_id = f"chatcmpl-{uuid.uuid4()}"
60
+ created_time = int(time.time())
61
+
62
+ if stream:
63
+ return self._create_stream(request_id, created_time, model, payload)
64
+ else:
65
+ return self._create_non_stream(request_id, created_time, model, payload)
66
+
67
+ def _create_stream(
68
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
69
+ ) -> Generator[ChatCompletionChunk, None, None]:
70
+ try:
71
+ response = self._client.session.post(
72
+ self._client.api_endpoint,
73
+ headers=self._client.headers,
74
+ json=payload,
75
+ stream=True,
76
+ timeout=self._client.timeout
77
+ )
78
+
79
+ # Handle non-200 responses
80
+ if not response.ok:
81
+ raise IOError(
82
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
83
+ )
84
+
85
+ # Use count_tokens for prompt tokens
86
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
87
+ completion_tokens = 0
88
+ total_tokens = 0
89
+
90
+ for line in response.iter_lines():
91
+ if line:
92
+ decoded_line = line.decode('utf-8').strip()
93
+
94
+ # X0GPT uses a different format, so we need to extract the content
95
+ match = re.search(r'0:"(.*?)"', decoded_line)
96
+ if match:
97
+ content = match.group(1)
98
+
99
+ # Format the content (replace escaped newlines)
100
+ content = self._client.format_text(content)
101
+
102
+ # Update token counts using count_tokens
103
+ completion_tokens += count_tokens(content)
104
+ total_tokens = prompt_tokens + completion_tokens
105
+
106
+ # Create the delta object
107
+ delta = ChoiceDelta(
108
+ content=content,
109
+ role="assistant",
110
+ tool_calls=None
111
+ )
112
+
113
+ # Create the choice object
114
+ choice = Choice(
115
+ index=0,
116
+ delta=delta,
117
+ finish_reason=None,
118
+ logprobs=None
119
+ )
120
+
121
+ # Create the chunk object
122
+ chunk = ChatCompletionChunk(
123
+ id=request_id,
124
+ choices=[choice],
125
+ created=created_time,
126
+ model=model,
127
+ system_fingerprint=None
128
+ )
129
+
130
+ # Set usage directly on the chunk object
131
+ chunk.usage = {
132
+ "prompt_tokens": prompt_tokens,
133
+ "completion_tokens": completion_tokens,
134
+ "total_tokens": total_tokens,
135
+ "estimated_cost": None
136
+ }
137
+
138
+ # Return the chunk object with usage information
139
+ yield chunk
140
+
141
+ # Final chunk with finish_reason="stop"
142
+ delta = ChoiceDelta(
143
+ content=None,
144
+ role=None,
145
+ tool_calls=None
146
+ )
147
+
148
+ choice = Choice(
149
+ index=0,
150
+ delta=delta,
151
+ finish_reason="stop",
152
+ logprobs=None
153
+ )
154
+
155
+ chunk = ChatCompletionChunk(
156
+ id=request_id,
157
+ choices=[choice],
158
+ created=created_time,
159
+ model=model,
160
+ system_fingerprint=None
161
+ )
162
+
163
+ # Set usage directly on the chunk object
164
+ chunk.usage = {
165
+ "prompt_tokens": prompt_tokens,
166
+ "completion_tokens": completion_tokens,
167
+ "total_tokens": total_tokens,
168
+ "estimated_cost": None
169
+ }
170
+
171
+ yield chunk
172
+
173
+ except Exception as e:
174
+ print(f"Error during X0GPT stream request: {e}")
175
+ raise IOError(f"X0GPT request failed: {e}") from e
176
+
177
+ def _create_non_stream(
178
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
179
+ ) -> ChatCompletion:
180
+ try:
181
+ response = self._client.session.post(
182
+ self._client.api_endpoint,
183
+ headers=self._client.headers,
184
+ json=payload,
185
+ stream=True,
186
+ timeout=self._client.timeout
187
+ )
188
+
189
+ # Handle non-200 responses
190
+ if not response.ok:
191
+ raise IOError(
192
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
193
+ )
194
+
195
+ # Collect the full response
196
+ full_text = ""
197
+ for line in response.iter_lines(decode_unicode=True):
198
+ if line:
199
+ match = re.search(r'0:"(.*?)"', line)
200
+ if match:
201
+ content = match.group(1)
202
+ full_text += content
203
+
204
+ # Format the text (replace escaped newlines)
205
+ full_text = self._client.format_text(full_text)
206
+
207
+ # Use count_tokens for accurate token counts
208
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
209
+ completion_tokens = count_tokens(full_text)
210
+ total_tokens = prompt_tokens + completion_tokens
211
+
212
+ # Create the message object
213
+ message = ChatCompletionMessage(
214
+ role="assistant",
215
+ content=full_text
216
+ )
217
+
218
+ # Create the choice object
219
+ choice = Choice(
220
+ index=0,
221
+ message=message,
222
+ finish_reason="stop"
223
+ )
224
+
225
+ # Create the usage object
226
+ usage = CompletionUsage(
227
+ prompt_tokens=prompt_tokens,
228
+ completion_tokens=completion_tokens,
229
+ total_tokens=total_tokens
230
+ )
231
+
232
+ # Create the completion object
233
+ completion = ChatCompletion(
234
+ id=request_id,
235
+ choices=[choice],
236
+ created=created_time,
237
+ model=model,
238
+ usage=usage,
239
+ )
240
+
241
+ return completion
242
+
243
+ except Exception as e:
244
+ print(f"Error during X0GPT non-stream request: {e}")
245
+ raise IOError(f"X0GPT request failed: {e}") from e
246
+
247
+ class Chat(BaseChat):
248
+ def __init__(self, client: 'X0GPT'):
249
+ self.completions = Completions(client)
250
+
251
+ class X0GPT(OpenAICompatibleProvider):
252
+ """
253
+ OpenAI-compatible client for X0GPT API.
254
+
255
+ Usage:
256
+ client = X0GPT()
257
+ response = client.chat.completions.create(
258
+ model="X0GPT",
259
+ messages=[{"role": "user", "content": "Hello!"}]
260
+ )
261
+ """
262
+
263
+ AVAILABLE_MODELS = ["X0GPT"]
264
+
265
+ def __init__(
266
+ self,
267
+ timeout: Optional[int] = None,
268
+ browser: str = "chrome"
269
+ ):
270
+ """
271
+ Initialize the X0GPT client.
272
+
273
+ Args:
274
+ timeout: Request timeout in seconds (None for no timeout)
275
+ browser: Browser to emulate in user agent
276
+ """
277
+ self.timeout = timeout
278
+ self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
279
+ self.session = requests.Session()
280
+
281
+ # Initialize LitAgent for user agent generation
282
+ agent = LitAgent()
283
+ self.fingerprint = agent.generate_fingerprint(browser)
284
+
285
+ self.headers = {
286
+ "authority": "x0-gpt.devwtf.in",
287
+ "method": "POST",
288
+ "path": "/api/stream/reply",
289
+ "scheme": "https",
290
+ "accept": self.fingerprint["accept"],
291
+ "accept-encoding": "gzip, deflate, br, zstd",
292
+ "accept-language": self.fingerprint["accept_language"],
293
+ "content-type": "application/json",
294
+ "dnt": "1",
295
+ "origin": "https://x0-gpt.devwtf.in",
296
+ "priority": "u=1, i",
297
+ "referer": "https://x0-gpt.devwtf.in/chat",
298
+ "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
299
+ "sec-ch-ua-mobile": "?0",
300
+ "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
301
+ "user-agent": self.fingerprint["user_agent"]
302
+ }
303
+
304
+ self.session.headers.update(self.headers)
305
+
306
+ # Initialize the chat interface
307
+ self.chat = Chat(self)
308
+
309
+ @property
310
+ def models(self):
311
+ class _ModelList:
312
+ def list(inner_self):
313
+ return X0GPT.AVAILABLE_MODELS
314
+ return _ModelList()
315
+
316
+ def format_text(self, text: str) -> str:
317
+ """
318
+ Format text by replacing escaped newlines with actual newlines.
319
+
320
+ Args:
321
+ text: Text to format
322
+
323
+ Returns:
324
+ Formatted text
325
+ """
326
+ # Use a more comprehensive approach to handle all escape sequences
327
+ try:
328
+ # First handle double backslashes to avoid issues
329
+ text = text.replace('\\\\', '\\')
330
+
331
+ # Handle common escape sequences
332
+ text = text.replace('\\n', '\n')
333
+ text = text.replace('\\r', '\r')
334
+ text = text.replace('\\t', '\t')
335
+ text = text.replace('\\"', '"')
336
+ text = text.replace("\\'", "'")
337
+
338
+ # Handle any remaining escape sequences using JSON decoding
339
+ # This is a fallback in case there are other escape sequences
340
+ try:
341
+ # Add quotes to make it a valid JSON string
342
+ json_str = f'"{text}"'
343
+ # Use json module to decode all escape sequences
344
+ decoded = json.loads(json_str)
345
+ return decoded
346
+ except json.JSONDecodeError:
347
+ # If JSON decoding fails, return the text with the replacements we've already done
348
+ return text
349
+ except Exception as e:
350
+ # If any error occurs, return the original text
351
+ print(f"Warning: Error formatting text: {e}")
352
+ return text
353
+
354
+ def convert_model_name(self, model: str) -> str:
355
+ """
356
+ Convert model names to ones supported by X0GPT.
357
+
358
+ Args:
359
+ model: Model name to convert
360
+
361
+ Returns:
362
+ X0GPT model name
363
+ """
364
+ # X0GPT doesn't actually use model names, but we'll keep this for compatibility
365
+ return model