webscout 8.3.3__py3-none-any.whl → 8.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +53 -800
- webscout/Bard.py +2 -22
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +26 -11
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +81 -57
- webscout/Provider/ExaChat.py +9 -5
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/FreeGemini.py +2 -2
- webscout/Provider/Gemini.py +3 -10
- webscout/Provider/GeminiProxy.py +31 -5
- webscout/Provider/LambdaChat.py +39 -31
- webscout/Provider/Netwrck.py +5 -8
- webscout/Provider/OLLAMA.py +8 -9
- webscout/Provider/OPENAI/README.md +1 -1
- webscout/Provider/OPENAI/TogetherAI.py +57 -48
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +1 -3
- webscout/Provider/OPENAI/autoproxy.py +1 -1
- webscout/Provider/OPENAI/copilot.py +73 -26
- webscout/Provider/OPENAI/deepinfra.py +60 -24
- webscout/Provider/OPENAI/exachat.py +9 -5
- webscout/Provider/OPENAI/monochat.py +3 -3
- webscout/Provider/OPENAI/netwrck.py +4 -7
- webscout/Provider/OPENAI/qodo.py +630 -0
- webscout/Provider/OPENAI/scirachat.py +86 -49
- webscout/Provider/OPENAI/textpollinations.py +19 -14
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +478 -0
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/monochat.py +3 -3
- webscout/Provider/TTI/together.py +7 -6
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TextPollinationsAI.py +19 -14
- webscout/Provider/TogetherAI.py +57 -44
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +4 -10
- webscout/Provider/copilot.py +58 -61
- webscout/Provider/freeaichat.py +64 -55
- webscout/Provider/monochat.py +275 -0
- webscout/Provider/scira_chat.py +115 -21
- webscout/Provider/toolbaz.py +5 -10
- webscout/Provider/typefully.py +1 -11
- webscout/Provider/x0gpt.py +325 -315
- webscout/__init__.py +4 -11
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +119 -5
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/sanitize.py +1074 -0
- webscout/version.py +1 -1
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -150
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/RECORD +70 -72
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/freeaichat.py +0 -363
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
webscout/Provider/x0gpt.py
CHANGED
|
@@ -1,315 +1,325 @@
|
|
|
1
|
-
from typing import Generator, Optional, Union, Any, Dict
|
|
2
|
-
from uuid import uuid4
|
|
3
|
-
from curl_cffi import CurlError
|
|
4
|
-
from curl_cffi.requests import Session
|
|
5
|
-
import re
|
|
6
|
-
|
|
7
|
-
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
9
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
10
|
-
from webscout.AIbase import Provider
|
|
11
|
-
from webscout import exceptions
|
|
12
|
-
from webscout.litagent import LitAgent
|
|
13
|
-
# Import HTTPVersion enum
|
|
14
|
-
from curl_cffi.const import CurlHttpVersion
|
|
15
|
-
|
|
16
|
-
class X0GPT(Provider):
|
|
17
|
-
"""
|
|
18
|
-
A class to interact with the x0-gpt.devwtf.in API.
|
|
19
|
-
|
|
20
|
-
Attributes:
|
|
21
|
-
system_prompt (str): The system prompt to define the assistant's role.
|
|
22
|
-
|
|
23
|
-
Examples:
|
|
24
|
-
>>> from webscout.Provider.x0gpt import X0GPT
|
|
25
|
-
>>> ai = X0GPT()
|
|
26
|
-
>>> response = ai.chat("What's the weather today?")
|
|
27
|
-
>>> print(response)
|
|
28
|
-
'The weather today is sunny with a high of 75°F.'
|
|
29
|
-
"""
|
|
30
|
-
AVAILABLE_MODELS = ["UNKNOWN"]
|
|
31
|
-
|
|
32
|
-
def __init__(
|
|
33
|
-
self,
|
|
34
|
-
is_conversation: bool = True,
|
|
35
|
-
max_tokens: int = 600,
|
|
36
|
-
timeout: int = 30,
|
|
37
|
-
intro: str = None,
|
|
38
|
-
filepath: str = None,
|
|
39
|
-
update_file: bool = True,
|
|
40
|
-
proxies: dict = {},
|
|
41
|
-
history_offset: int = 10250,
|
|
42
|
-
act: str = None,
|
|
43
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
44
|
-
model: str = "UNKNOWN"
|
|
45
|
-
):
|
|
46
|
-
"""
|
|
47
|
-
Initializes the X0GPT API with given parameters.
|
|
48
|
-
|
|
49
|
-
Args:
|
|
50
|
-
is_conversation (bool): Whether the provider is in conversation mode.
|
|
51
|
-
max_tokens (int): Maximum number of tokens to sample.
|
|
52
|
-
timeout (int): Timeout for API requests.
|
|
53
|
-
intro (str): Introduction message for the conversation.
|
|
54
|
-
filepath (str): Filepath for storing conversation history.
|
|
55
|
-
update_file (bool): Whether to update the conversation history file.
|
|
56
|
-
proxies (dict): Proxies for the API requests.
|
|
57
|
-
history_offset (int): Offset for conversation history.
|
|
58
|
-
act (str): Act for the conversation.
|
|
59
|
-
system_prompt (str): The system prompt to define the assistant's role.
|
|
60
|
-
|
|
61
|
-
Examples:
|
|
62
|
-
>>> ai = X0GPT(system_prompt="You are a friendly assistant.")
|
|
63
|
-
>>> print(ai.system_prompt)
|
|
64
|
-
'You are a friendly assistant.'
|
|
65
|
-
"""
|
|
66
|
-
# Initialize curl_cffi Session instead of requests.Session
|
|
67
|
-
self.session = Session()
|
|
68
|
-
self.is_conversation = is_conversation
|
|
69
|
-
self.max_tokens_to_sample = max_tokens
|
|
70
|
-
self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
|
|
71
|
-
self.timeout = timeout
|
|
72
|
-
self.last_response = {}
|
|
73
|
-
self.system_prompt = system_prompt
|
|
74
|
-
|
|
75
|
-
# Initialize LitAgent for user agent generation
|
|
76
|
-
self.agent = LitAgent()
|
|
77
|
-
|
|
78
|
-
self.headers = {
|
|
79
|
-
"authority": "x0-gpt.devwtf.in",
|
|
80
|
-
"method": "POST",
|
|
81
|
-
"path": "/api/stream/reply",
|
|
82
|
-
"scheme": "https",
|
|
83
|
-
"accept": "*/*",
|
|
84
|
-
"accept-encoding": "gzip, deflate, br, zstd", # Keep zstd for now
|
|
85
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
86
|
-
# "content-length": "114", # Let curl_cffi handle content-length
|
|
87
|
-
"content-type": "application/json",
|
|
88
|
-
"dnt": "1",
|
|
89
|
-
"origin": "https://x0-gpt.devwtf.in",
|
|
90
|
-
# "priority": "u=1, i", # Remove priority header
|
|
91
|
-
"referer": "https://x0-gpt.devwtf.in/chat",
|
|
92
|
-
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
93
|
-
"sec-ch-ua-mobile": "?0",
|
|
94
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
95
|
-
"user-agent": self.agent.random()
|
|
96
|
-
}
|
|
97
|
-
|
|
98
|
-
self.__available_optimizers = (
|
|
99
|
-
method
|
|
100
|
-
for method in dir(Optimizers)
|
|
101
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
102
|
-
)
|
|
103
|
-
# Update curl_cffi session headers and proxies
|
|
104
|
-
self.session.headers.update(self.headers)
|
|
105
|
-
self.session.proxies = proxies
|
|
106
|
-
|
|
107
|
-
Conversation.intro = (
|
|
108
|
-
AwesomePrompts().get_act(
|
|
109
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
110
|
-
)
|
|
111
|
-
if act
|
|
112
|
-
else intro or Conversation.intro
|
|
113
|
-
)
|
|
114
|
-
self.conversation = Conversation(
|
|
115
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
116
|
-
)
|
|
117
|
-
self.conversation.history_offset = history_offset
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
#
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
if raw:
|
|
209
|
-
yield content_chunk
|
|
210
|
-
else:
|
|
211
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
raw=raw,
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
1
|
+
from typing import Generator, Optional, Union, Any, Dict
|
|
2
|
+
from uuid import uuid4
|
|
3
|
+
from curl_cffi import CurlError
|
|
4
|
+
from curl_cffi.requests import Session
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
|
+
# Import HTTPVersion enum
|
|
14
|
+
from curl_cffi.const import CurlHttpVersion
|
|
15
|
+
|
|
16
|
+
class X0GPT(Provider):
|
|
17
|
+
"""
|
|
18
|
+
A class to interact with the x0-gpt.devwtf.in API.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
22
|
+
|
|
23
|
+
Examples:
|
|
24
|
+
>>> from webscout.Provider.x0gpt import X0GPT
|
|
25
|
+
>>> ai = X0GPT()
|
|
26
|
+
>>> response = ai.chat("What's the weather today?")
|
|
27
|
+
>>> print(response)
|
|
28
|
+
'The weather today is sunny with a high of 75°F.'
|
|
29
|
+
"""
|
|
30
|
+
AVAILABLE_MODELS = ["UNKNOWN"]
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
is_conversation: bool = True,
|
|
35
|
+
max_tokens: int = 600,
|
|
36
|
+
timeout: int = 30,
|
|
37
|
+
intro: str = None,
|
|
38
|
+
filepath: str = None,
|
|
39
|
+
update_file: bool = True,
|
|
40
|
+
proxies: dict = {},
|
|
41
|
+
history_offset: int = 10250,
|
|
42
|
+
act: str = None,
|
|
43
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
44
|
+
model: str = "UNKNOWN"
|
|
45
|
+
):
|
|
46
|
+
"""
|
|
47
|
+
Initializes the X0GPT API with given parameters.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
is_conversation (bool): Whether the provider is in conversation mode.
|
|
51
|
+
max_tokens (int): Maximum number of tokens to sample.
|
|
52
|
+
timeout (int): Timeout for API requests.
|
|
53
|
+
intro (str): Introduction message for the conversation.
|
|
54
|
+
filepath (str): Filepath for storing conversation history.
|
|
55
|
+
update_file (bool): Whether to update the conversation history file.
|
|
56
|
+
proxies (dict): Proxies for the API requests.
|
|
57
|
+
history_offset (int): Offset for conversation history.
|
|
58
|
+
act (str): Act for the conversation.
|
|
59
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
60
|
+
|
|
61
|
+
Examples:
|
|
62
|
+
>>> ai = X0GPT(system_prompt="You are a friendly assistant.")
|
|
63
|
+
>>> print(ai.system_prompt)
|
|
64
|
+
'You are a friendly assistant.'
|
|
65
|
+
"""
|
|
66
|
+
# Initialize curl_cffi Session instead of requests.Session
|
|
67
|
+
self.session = Session()
|
|
68
|
+
self.is_conversation = is_conversation
|
|
69
|
+
self.max_tokens_to_sample = max_tokens
|
|
70
|
+
self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
|
|
71
|
+
self.timeout = timeout
|
|
72
|
+
self.last_response = {}
|
|
73
|
+
self.system_prompt = system_prompt
|
|
74
|
+
|
|
75
|
+
# Initialize LitAgent for user agent generation
|
|
76
|
+
self.agent = LitAgent()
|
|
77
|
+
|
|
78
|
+
self.headers = {
|
|
79
|
+
"authority": "x0-gpt.devwtf.in",
|
|
80
|
+
"method": "POST",
|
|
81
|
+
"path": "/api/stream/reply",
|
|
82
|
+
"scheme": "https",
|
|
83
|
+
"accept": "*/*",
|
|
84
|
+
"accept-encoding": "gzip, deflate, br, zstd", # Keep zstd for now
|
|
85
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
86
|
+
# "content-length": "114", # Let curl_cffi handle content-length
|
|
87
|
+
"content-type": "application/json",
|
|
88
|
+
"dnt": "1",
|
|
89
|
+
"origin": "https://x0-gpt.devwtf.in",
|
|
90
|
+
# "priority": "u=1, i", # Remove priority header
|
|
91
|
+
"referer": "https://x0-gpt.devwtf.in/chat",
|
|
92
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
93
|
+
"sec-ch-ua-mobile": "?0",
|
|
94
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
95
|
+
"user-agent": self.agent.random()
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
self.__available_optimizers = (
|
|
99
|
+
method
|
|
100
|
+
for method in dir(Optimizers)
|
|
101
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
102
|
+
)
|
|
103
|
+
# Update curl_cffi session headers and proxies
|
|
104
|
+
self.session.headers.update(self.headers)
|
|
105
|
+
self.session.proxies = proxies
|
|
106
|
+
|
|
107
|
+
Conversation.intro = (
|
|
108
|
+
AwesomePrompts().get_act(
|
|
109
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
110
|
+
)
|
|
111
|
+
if act
|
|
112
|
+
else intro or Conversation.intro
|
|
113
|
+
)
|
|
114
|
+
self.conversation = Conversation(
|
|
115
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
116
|
+
)
|
|
117
|
+
self.conversation.history_offset = history_offset
|
|
118
|
+
|
|
119
|
+
def ask(
|
|
120
|
+
self,
|
|
121
|
+
prompt: str,
|
|
122
|
+
stream: bool = False,
|
|
123
|
+
raw: bool = False,
|
|
124
|
+
optimizer: str = None,
|
|
125
|
+
conversationally: bool = False,
|
|
126
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
127
|
+
"""
|
|
128
|
+
Sends a prompt to the x0-gpt.devwtf.in API and returns the response.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
prompt (str): The prompt to send to the API.
|
|
132
|
+
stream (bool): Whether to stream the response.
|
|
133
|
+
raw (bool): Whether to return the raw response.
|
|
134
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
135
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
Dict[str, Any]: The API response.
|
|
139
|
+
|
|
140
|
+
Examples:
|
|
141
|
+
>>> ai = X0GPT()
|
|
142
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
143
|
+
>>> print(response)
|
|
144
|
+
{'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
|
|
145
|
+
"""
|
|
146
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
147
|
+
if optimizer:
|
|
148
|
+
if optimizer in self.__available_optimizers:
|
|
149
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
150
|
+
conversation_prompt if conversationally else prompt
|
|
151
|
+
)
|
|
152
|
+
else:
|
|
153
|
+
raise Exception(
|
|
154
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
payload = {
|
|
158
|
+
"messages": [
|
|
159
|
+
{"role": "system", "content": self.system_prompt},
|
|
160
|
+
{"role": "user", "content": conversation_prompt}
|
|
161
|
+
],
|
|
162
|
+
"chatId": uuid4().hex,
|
|
163
|
+
"namespace": None
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
def for_stream():
|
|
167
|
+
try:
|
|
168
|
+
# Use curl_cffi session post with updated impersonate and http_version
|
|
169
|
+
response = self.session.post(
|
|
170
|
+
self.api_endpoint,
|
|
171
|
+
headers=self.headers,
|
|
172
|
+
json=payload,
|
|
173
|
+
stream=True,
|
|
174
|
+
timeout=self.timeout,
|
|
175
|
+
impersonate="chrome120", # Try a different impersonation profile
|
|
176
|
+
http_version=CurlHttpVersion.V1_1 # Force HTTP/1.1
|
|
177
|
+
)
|
|
178
|
+
if not response.ok:
|
|
179
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
180
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
streaming_response = ""
|
|
184
|
+
# Use sanitize_stream with regex-based extraction and filtering
|
|
185
|
+
processed_stream = sanitize_stream(
|
|
186
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
187
|
+
intro_value=None, # No simple prefix to remove here
|
|
188
|
+
to_json=False, # Content is not JSON
|
|
189
|
+
# Use regex to extract content from x0gpt format '0:"..."'
|
|
190
|
+
extract_regexes=[r'0:"(.*?)"'],
|
|
191
|
+
# Skip empty chunks, connection status messages, and control characters
|
|
192
|
+
skip_regexes=[
|
|
193
|
+
r'^\s*$', # Empty lines
|
|
194
|
+
r'data:\s*\[DONE\]', # Stream end markers
|
|
195
|
+
r'event:\s*', # SSE event headers
|
|
196
|
+
r'^\d+:\s*$', # Standalone numbers
|
|
197
|
+
r'^:\s*$', # Colon-only lines
|
|
198
|
+
r'^\s*[\x00-\x1f]+\s*$' # Control characters
|
|
199
|
+
],
|
|
200
|
+
raw=raw
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
for content_chunk in processed_stream:
|
|
204
|
+
# Always yield as string, even in raw mode
|
|
205
|
+
if isinstance(content_chunk, bytes):
|
|
206
|
+
content_chunk = content_chunk.decode('utf-8', errors='ignore')
|
|
207
|
+
|
|
208
|
+
if raw:
|
|
209
|
+
yield content_chunk
|
|
210
|
+
else:
|
|
211
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
212
|
+
# Handle unicode escapes and clean up the content
|
|
213
|
+
try:
|
|
214
|
+
# Decode unicode escapes like \u00e9
|
|
215
|
+
clean_content = content_chunk.encode().decode('unicode_escape')
|
|
216
|
+
# Handle escaped backslashes and quotes
|
|
217
|
+
clean_content = clean_content.replace('\\\\', '\\').replace('\\"', '"')
|
|
218
|
+
streaming_response += clean_content
|
|
219
|
+
yield dict(text=clean_content)
|
|
220
|
+
except (UnicodeDecodeError, UnicodeEncodeError):
|
|
221
|
+
# Fallback to original content if unicode processing fails
|
|
222
|
+
streaming_response += content_chunk
|
|
223
|
+
yield dict(text=content_chunk)
|
|
224
|
+
|
|
225
|
+
self.last_response.update(dict(text=streaming_response))
|
|
226
|
+
self.conversation.update_chat_history(
|
|
227
|
+
prompt, self.get_message(self.last_response)
|
|
228
|
+
)
|
|
229
|
+
except CurlError as e: # Catch CurlError
|
|
230
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
231
|
+
except Exception as e: # Catch other potential exceptions
|
|
232
|
+
# Include the original exception type in the message for clarity
|
|
233
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
234
|
+
|
|
235
|
+
def for_non_stream():
|
|
236
|
+
# This function implicitly uses the updated for_stream
|
|
237
|
+
if stream:
|
|
238
|
+
return for_stream()
|
|
239
|
+
for _ in for_stream():
|
|
240
|
+
pass
|
|
241
|
+
return self.last_response
|
|
242
|
+
|
|
243
|
+
return for_stream() if stream else for_non_stream()
|
|
244
|
+
|
|
245
|
+
def chat(
|
|
246
|
+
self,
|
|
247
|
+
prompt: str,
|
|
248
|
+
stream: bool = False,
|
|
249
|
+
optimizer: str = None,
|
|
250
|
+
conversationally: bool = False,
|
|
251
|
+
raw: bool = False, # Added raw parameter
|
|
252
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
253
|
+
"""
|
|
254
|
+
Generates a response from the X0GPT API.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
prompt (str): The prompt to send to the API.
|
|
258
|
+
stream (bool): Whether to stream the response.
|
|
259
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
260
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
261
|
+
raw (bool): Whether to return raw response chunks.
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
str: The API response.
|
|
265
|
+
|
|
266
|
+
Examples:
|
|
267
|
+
>>> ai = X0GPT()
|
|
268
|
+
>>> response = ai.chat("What's the weather today?")
|
|
269
|
+
>>> print(response)
|
|
270
|
+
'The weather today is sunny with a high of 75°F.'
|
|
271
|
+
"""
|
|
272
|
+
|
|
273
|
+
def for_stream():
|
|
274
|
+
for response in self.ask(
|
|
275
|
+
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
276
|
+
):
|
|
277
|
+
if raw:
|
|
278
|
+
yield response
|
|
279
|
+
else:
|
|
280
|
+
yield self.get_message(response)
|
|
281
|
+
|
|
282
|
+
def for_non_stream():
|
|
283
|
+
result = self.ask(
|
|
284
|
+
prompt,
|
|
285
|
+
False,
|
|
286
|
+
raw=raw,
|
|
287
|
+
optimizer=optimizer,
|
|
288
|
+
conversationally=conversationally,
|
|
289
|
+
)
|
|
290
|
+
if raw:
|
|
291
|
+
return result
|
|
292
|
+
else:
|
|
293
|
+
return self.get_message(result)
|
|
294
|
+
|
|
295
|
+
return for_stream() if stream else for_non_stream()
|
|
296
|
+
|
|
297
|
+
def get_message(self, response: dict) -> str:
|
|
298
|
+
"""
|
|
299
|
+
Extracts the message from the API response.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
response (dict): The API response.
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
str: The message content.
|
|
306
|
+
|
|
307
|
+
Examples:
|
|
308
|
+
>>> ai = X0GPT()
|
|
309
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
310
|
+
>>> message = ai.get_message(response)
|
|
311
|
+
>>> print(message)
|
|
312
|
+
'Why did the scarecrow win an award? Because he was outstanding in his field!'
|
|
313
|
+
"""
|
|
314
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
315
|
+
# Ensure text exists before processing
|
|
316
|
+
text = response.get("text", "")
|
|
317
|
+
# Text is now cleaned by the regex-based sanitize_stream processing
|
|
318
|
+
return text
|
|
319
|
+
|
|
320
|
+
if __name__ == "__main__":
|
|
321
|
+
from rich import print
|
|
322
|
+
ai = X0GPT(timeout=5000)
|
|
323
|
+
response = ai.chat("write a poem about AI", stream=True, raw=False)
|
|
324
|
+
for chunk in response:
|
|
325
|
+
print(chunk, end="", flush=True)
|