webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +34 -16
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +155 -35
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +172 -82
- webscout/Provider/LambdaChat.py +1 -0
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
- webscout/Provider/OPENAI/Cloudflare.py +38 -21
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +35 -18
- webscout/Provider/OPENAI/NEMOTRON.py +34 -34
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +304 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +374 -0
- webscout/Provider/OPENAI/__init__.py +7 -1
- webscout/Provider/OPENAI/ai4chat.py +73 -63
- webscout/Provider/OPENAI/api.py +869 -644
- webscout/Provider/OPENAI/base.py +2 -0
- webscout/Provider/OPENAI/c4ai.py +34 -13
- webscout/Provider/OPENAI/chatgpt.py +575 -556
- webscout/Provider/OPENAI/chatgptclone.py +512 -487
- webscout/Provider/OPENAI/chatsandbox.py +11 -6
- webscout/Provider/OPENAI/copilot.py +258 -0
- webscout/Provider/OPENAI/deepinfra.py +327 -318
- webscout/Provider/OPENAI/e2b.py +140 -104
- webscout/Provider/OPENAI/exaai.py +420 -411
- webscout/Provider/OPENAI/exachat.py +448 -443
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +12 -8
- webscout/Provider/OPENAI/glider.py +15 -8
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +311 -307
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +18 -9
- webscout/Provider/OPENAI/multichat.py +7 -5
- webscout/Provider/OPENAI/netwrck.py +16 -11
- webscout/Provider/OPENAI/oivscode.py +290 -0
- webscout/Provider/OPENAI/opkfc.py +507 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +29 -17
- webscout/Provider/OPENAI/sonus.py +308 -303
- webscout/Provider/OPENAI/standardinput.py +442 -433
- webscout/Provider/OPENAI/textpollinations.py +18 -11
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +17 -10
- webscout/Provider/OPENAI/typegpt.py +21 -11
- webscout/Provider/OPENAI/uncovrAI.py +477 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +435 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +166 -163
- webscout/Provider/OPENAI/x0gpt.py +26 -37
- webscout/Provider/OPENAI/yep.py +384 -356
- webscout/Provider/PI.py +2 -1
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/granite.py +41 -6
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
- webscout-8.3.dist-info/RECORD +290 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- webscout-8.2.8.dist-info/RECORD +0 -334
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
|
@@ -1,359 +1,333 @@
|
|
|
1
|
-
import json
|
|
2
|
-
import random
|
|
3
|
-
from uuid import uuid4
|
|
4
|
-
from typing import Dict, Optional, Generator, Union, Any
|
|
5
|
-
from curl_cffi import requests
|
|
6
|
-
|
|
7
|
-
from webscout.AIbase import AISearch
|
|
8
|
-
from webscout import exceptions
|
|
9
|
-
from webscout.litagent import LitAgent
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class
|
|
13
|
-
"""A
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
>>>
|
|
23
|
-
>>>
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
'
|
|
103
|
-
'
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
#
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
'
|
|
242
|
-
'
|
|
243
|
-
'
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
'
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
# Process the final response to extract the answer
|
|
334
|
-
if final_response:
|
|
335
|
-
answer_text = self._extract_answer(final_response)
|
|
336
|
-
return Response(answer_text) if not raw else final_response
|
|
337
|
-
elif chunks:
|
|
338
|
-
answer_text = self._extract_answer(chunks[-1])
|
|
339
|
-
return Response(answer_text) if not raw else chunks[-1]
|
|
340
|
-
else:
|
|
341
|
-
return Response("") if not raw else {}
|
|
342
|
-
|
|
343
|
-
# If we get here, something went wrong
|
|
344
|
-
raise exceptions.FailedToGenerateResponseError("Failed to get complete response")
|
|
345
|
-
|
|
346
|
-
except requests.RequestsError as e:
|
|
347
|
-
raise exceptions.APIConnectionError(f"Connection error: {str(e)}")
|
|
348
|
-
except json.JSONDecodeError:
|
|
349
|
-
raise exceptions.FailedToGenerateResponseError("Failed to parse response JSON")
|
|
350
|
-
except Exception as e:
|
|
351
|
-
raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
if __name__ == "__main__":
|
|
355
|
-
# Simple test
|
|
356
|
-
ai = Perplexity()
|
|
357
|
-
response = ai.search("What is Python?")
|
|
358
|
-
print(response)
|
|
1
|
+
import json
|
|
2
|
+
import random
|
|
3
|
+
from uuid import uuid4
|
|
4
|
+
from typing import Dict, Optional, Generator, Union, Any
|
|
5
|
+
from curl_cffi import requests
|
|
6
|
+
|
|
7
|
+
from webscout.AIbase import AISearch, SearchResponse
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout.litagent import LitAgent
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Perplexity(AISearch):
|
|
13
|
+
"""A class to interact with the Perplexity AI search API.
|
|
14
|
+
|
|
15
|
+
Perplexity provides a powerful search interface that returns AI-generated responses
|
|
16
|
+
based on web content. It supports both streaming and non-streaming responses,
|
|
17
|
+
multiple search modes, and model selection.
|
|
18
|
+
|
|
19
|
+
Basic Usage:
|
|
20
|
+
>>> from webscout import Perplexity
|
|
21
|
+
>>> ai = Perplexity()
|
|
22
|
+
>>> # Non-streaming example
|
|
23
|
+
>>> response = ai.search("What is Python?")
|
|
24
|
+
>>> print(response)
|
|
25
|
+
Python is a high-level programming language...
|
|
26
|
+
|
|
27
|
+
>>> # Streaming example
|
|
28
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
29
|
+
... print(chunk, end="", flush=True)
|
|
30
|
+
Artificial Intelligence is...
|
|
31
|
+
|
|
32
|
+
>>> # Pro search with specific model (requires authentication via cookies)
|
|
33
|
+
>>> cookies = {"perplexity-user": "your_cookie_value"}
|
|
34
|
+
>>> ai_pro = Perplexity(cookies=cookies)
|
|
35
|
+
>>> response = ai_pro.search("Latest AI research", mode="pro", model="gpt-4o")
|
|
36
|
+
>>> print(response)
|
|
37
|
+
|
|
38
|
+
>>> # Raw response format
|
|
39
|
+
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
40
|
+
... print(chunk)
|
|
41
|
+
{'text': 'Hello'}
|
|
42
|
+
{'text': ' there!'}
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
cookies (dict, optional): Cookies to use for authentication. Defaults to None.
|
|
46
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 60.
|
|
47
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
cookies: Optional[Dict[str, str]] = None,
|
|
53
|
+
timeout: int = 60,
|
|
54
|
+
proxies: Optional[Dict[str, str]] = None
|
|
55
|
+
):
|
|
56
|
+
"""
|
|
57
|
+
Initialize the Perplexity client.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
cookies (dict, optional): Cookies to use for authentication. Defaults to None.
|
|
61
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 60.
|
|
62
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
63
|
+
"""
|
|
64
|
+
self.timeout = timeout
|
|
65
|
+
self.agent = LitAgent()
|
|
66
|
+
self.session = requests.Session(headers={
|
|
67
|
+
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
68
|
+
'accept-language': 'en-US,en;q=0.9',
|
|
69
|
+
'cache-control': 'max-age=0',
|
|
70
|
+
'dnt': '1',
|
|
71
|
+
'priority': 'u=0, i',
|
|
72
|
+
'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"',
|
|
73
|
+
'sec-ch-ua-arch': '"x86"',
|
|
74
|
+
'sec-ch-ua-bitness': '"64"',
|
|
75
|
+
'sec-ch-ua-full-version': '"128.0.6613.120"',
|
|
76
|
+
'sec-ch-ua-full-version-list': '"Not;A=Brand";v="24.0.0.0", "Chromium";v="128.0.6613.120"',
|
|
77
|
+
'sec-ch-ua-mobile': '?0',
|
|
78
|
+
'sec-ch-ua-model': '""',
|
|
79
|
+
'sec-ch-ua-platform': '"Windows"',
|
|
80
|
+
'sec-ch-ua-platform-version': '"19.0.0"',
|
|
81
|
+
'sec-fetch-dest': 'document',
|
|
82
|
+
'sec-fetch-mode': 'navigate',
|
|
83
|
+
'sec-fetch-site': 'same-origin',
|
|
84
|
+
'sec-fetch-user': '?1',
|
|
85
|
+
'upgrade-insecure-requests': '1',
|
|
86
|
+
'user-agent': self.agent.random(),
|
|
87
|
+
}, cookies=cookies or {}, impersonate='chrome')
|
|
88
|
+
|
|
89
|
+
# Apply proxies if provided
|
|
90
|
+
if proxies:
|
|
91
|
+
self.session.proxies.update(proxies)
|
|
92
|
+
|
|
93
|
+
# Initialize session with socket.io
|
|
94
|
+
self.timestamp = format(random.getrandbits(32), '08x')
|
|
95
|
+
|
|
96
|
+
# Get socket.io session ID
|
|
97
|
+
response = self.session.get(f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}')
|
|
98
|
+
self.sid = json.loads(response.text[1:])['sid']
|
|
99
|
+
|
|
100
|
+
# Initialize socket.io connection
|
|
101
|
+
assert (self.session.post(
|
|
102
|
+
f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}&sid={self.sid}',
|
|
103
|
+
data='40{"jwt":"anonymous-ask-user"}'
|
|
104
|
+
)).text == 'OK'
|
|
105
|
+
|
|
106
|
+
# Get session info
|
|
107
|
+
self.session.get('https://www.perplexity.ai/api/auth/session')
|
|
108
|
+
|
|
109
|
+
# Set default values
|
|
110
|
+
self.copilot = 0 if not cookies else float('inf')
|
|
111
|
+
self.file_upload = 0 if not cookies else float('inf')
|
|
112
|
+
|
|
113
|
+
def _extract_answer(self, response):
|
|
114
|
+
"""
|
|
115
|
+
Extract the answer from the response.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
response (dict): The response from Perplexity AI.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
str: The extracted answer text.
|
|
122
|
+
"""
|
|
123
|
+
if not response:
|
|
124
|
+
return ""
|
|
125
|
+
|
|
126
|
+
# Find the FINAL step in the text array
|
|
127
|
+
final_step = None
|
|
128
|
+
if 'text' in response and isinstance(response['text'], list):
|
|
129
|
+
for step in response['text']:
|
|
130
|
+
if step.get('step_type') == 'FINAL' and 'content' in step and 'answer' in step['content']:
|
|
131
|
+
final_step = step
|
|
132
|
+
break
|
|
133
|
+
|
|
134
|
+
if not final_step:
|
|
135
|
+
return ""
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
# Parse the answer JSON string
|
|
139
|
+
answer_json = json.loads(final_step['content']['answer'])
|
|
140
|
+
return answer_json.get('answer', '')
|
|
141
|
+
except (json.JSONDecodeError, KeyError):
|
|
142
|
+
return ""
|
|
143
|
+
|
|
144
|
+
def search(
|
|
145
|
+
self,
|
|
146
|
+
prompt: str,
|
|
147
|
+
mode: str = 'auto',
|
|
148
|
+
model: Optional[str] = None,
|
|
149
|
+
sources: Optional[list] = None,
|
|
150
|
+
stream: bool = False,
|
|
151
|
+
raw: bool = False,
|
|
152
|
+
language: str = 'en-US',
|
|
153
|
+
follow_up: Optional[Dict[str, Any]] = None,
|
|
154
|
+
incognito: bool = False
|
|
155
|
+
) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse], None, None]]:
|
|
156
|
+
"""Search using the Perplexity API and get AI-generated responses.
|
|
157
|
+
|
|
158
|
+
This method sends a search query to Perplexity and returns the AI-generated response.
|
|
159
|
+
It supports both streaming and non-streaming modes, as well as raw response format.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
prompt (str): The search query or prompt to send to the API.
|
|
163
|
+
mode (str, optional): Search mode. Options: 'auto', 'pro', 'reasoning', 'deep research'.
|
|
164
|
+
Defaults to 'auto'.
|
|
165
|
+
model (str, optional): Model to use. Available models depend on the mode. Defaults to None.
|
|
166
|
+
sources (list, optional): Sources to use. Options: 'web', 'scholar', 'social'.
|
|
167
|
+
Defaults to ['web'].
|
|
168
|
+
stream (bool, optional): If True, yields response chunks as they arrive.
|
|
169
|
+
If False, returns complete response. Defaults to False.
|
|
170
|
+
raw (bool, optional): If True, returns raw response dictionaries.
|
|
171
|
+
If False, returns Response objects that convert to text automatically.
|
|
172
|
+
Defaults to False.
|
|
173
|
+
language (str, optional): Language to use. Defaults to 'en-US'.
|
|
174
|
+
follow_up (dict, optional): Follow-up information. Defaults to None.
|
|
175
|
+
incognito (bool, optional): Whether to use incognito mode. Defaults to False.
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
If stream=True: Generator yielding response chunks as they arrive
|
|
179
|
+
If stream=False: Complete response
|
|
180
|
+
|
|
181
|
+
Raises:
|
|
182
|
+
ValueError: If invalid mode or model is provided
|
|
183
|
+
exceptions.APIConnectionError: If connection to API fails
|
|
184
|
+
exceptions.FailedToGenerateResponseError: If response generation fails
|
|
185
|
+
"""
|
|
186
|
+
if sources is None:
|
|
187
|
+
sources = ['web']
|
|
188
|
+
|
|
189
|
+
# Validate inputs
|
|
190
|
+
if mode not in ['auto', 'pro', 'reasoning', 'deep research']:
|
|
191
|
+
raise ValueError('Search modes -> ["auto", "pro", "reasoning", "deep research"]')
|
|
192
|
+
|
|
193
|
+
if not all([source in ('web', 'scholar', 'social') for source in sources]):
|
|
194
|
+
raise ValueError('Sources -> ["web", "scholar", "social"]')
|
|
195
|
+
|
|
196
|
+
# Check if model is valid for the selected mode
|
|
197
|
+
valid_models = {
|
|
198
|
+
'auto': [None],
|
|
199
|
+
'pro': [None, 'sonar', 'gpt-4.5', 'gpt-4o', 'claude 3.7 sonnet', 'gemini 2.0 flash', 'grok-2'],
|
|
200
|
+
'reasoning': [None, 'r1', 'o3-mini', 'claude 3.7 sonnet'],
|
|
201
|
+
'deep research': [None]
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
if mode in valid_models and model not in valid_models[mode] and model is not None:
|
|
205
|
+
raise ValueError(f'Invalid model for {mode} mode. Valid models: {valid_models[mode]}')
|
|
206
|
+
|
|
207
|
+
# Prepare request data
|
|
208
|
+
json_data = {
|
|
209
|
+
'query_str': prompt,
|
|
210
|
+
'params': {
|
|
211
|
+
'attachments': follow_up['attachments'] if follow_up else [],
|
|
212
|
+
'frontend_context_uuid': str(uuid4()),
|
|
213
|
+
'frontend_uuid': str(uuid4()),
|
|
214
|
+
'is_incognito': incognito,
|
|
215
|
+
'language': language,
|
|
216
|
+
'last_backend_uuid': follow_up['backend_uuid'] if follow_up else None,
|
|
217
|
+
'mode': 'concise' if mode == 'auto' else 'copilot',
|
|
218
|
+
'model_preference': {
|
|
219
|
+
'auto': {
|
|
220
|
+
None: 'turbo'
|
|
221
|
+
},
|
|
222
|
+
'pro': {
|
|
223
|
+
None: 'pplx_pro',
|
|
224
|
+
'sonar': 'experimental',
|
|
225
|
+
'gpt-4.5': 'gpt45',
|
|
226
|
+
'gpt-4o': 'gpt4o',
|
|
227
|
+
'claude 3.7 sonnet': 'claude2',
|
|
228
|
+
'gemini 2.0 flash': 'gemini2flash',
|
|
229
|
+
'grok-2': 'grok'
|
|
230
|
+
},
|
|
231
|
+
'reasoning': {
|
|
232
|
+
None: 'pplx_reasoning',
|
|
233
|
+
'r1': 'r1',
|
|
234
|
+
'o3-mini': 'o3mini',
|
|
235
|
+
'claude 3.7 sonnet': 'claude37sonnetthinking'
|
|
236
|
+
},
|
|
237
|
+
'deep research': {
|
|
238
|
+
None: 'pplx_alpha'
|
|
239
|
+
}
|
|
240
|
+
}[mode][model],
|
|
241
|
+
'source': 'default',
|
|
242
|
+
'sources': sources,
|
|
243
|
+
'version': '2.18'
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
try:
|
|
248
|
+
# Make the request
|
|
249
|
+
resp = self.session.post(
|
|
250
|
+
'https://www.perplexity.ai/rest/sse/perplexity_ask',
|
|
251
|
+
json=json_data,
|
|
252
|
+
stream=True,
|
|
253
|
+
timeout=self.timeout
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
if resp.status_code != 200:
|
|
257
|
+
raise exceptions.APIConnectionError(f"API returned status code {resp.status_code}")
|
|
258
|
+
|
|
259
|
+
# Define streaming response handler
|
|
260
|
+
def stream_response():
|
|
261
|
+
for chunk in resp.iter_lines(delimiter=b'\r\n\r\n'):
|
|
262
|
+
content = chunk.decode('utf-8')
|
|
263
|
+
if content.startswith('event: message\r\n'):
|
|
264
|
+
content_json = json.loads(content[len('event: message\r\ndata: '):])
|
|
265
|
+
if 'text' in content_json:
|
|
266
|
+
try:
|
|
267
|
+
# If text is a string, try to parse it as JSON
|
|
268
|
+
if isinstance(content_json['text'], str):
|
|
269
|
+
content_json['text'] = json.loads(content_json['text'])
|
|
270
|
+
except json.JSONDecodeError:
|
|
271
|
+
pass
|
|
272
|
+
|
|
273
|
+
if raw:
|
|
274
|
+
yield content_json
|
|
275
|
+
else:
|
|
276
|
+
# For non-raw responses, extract text from each chunk
|
|
277
|
+
if 'text' in content_json and isinstance(content_json['text'], list):
|
|
278
|
+
for step in content_json['text']:
|
|
279
|
+
if step.get('type') == 'answer' and 'value' in step:
|
|
280
|
+
yield SearchResponse(step['value'])
|
|
281
|
+
elif step.get('type') == 'thinking' and 'value' in step:
|
|
282
|
+
yield SearchResponse(step['value'])
|
|
283
|
+
elif content.startswith('event: end_of_stream\r\n'):
|
|
284
|
+
return
|
|
285
|
+
|
|
286
|
+
# Handle streaming or non-streaming response
|
|
287
|
+
if stream:
|
|
288
|
+
return stream_response()
|
|
289
|
+
else:
|
|
290
|
+
chunks = []
|
|
291
|
+
final_response = None
|
|
292
|
+
|
|
293
|
+
for chunk in resp.iter_lines(delimiter=b'\r\n\r\n'):
|
|
294
|
+
content = chunk.decode('utf-8')
|
|
295
|
+
if content.startswith('event: message\r\n'):
|
|
296
|
+
content_json = json.loads(content[len('event: message\r\ndata: '):])
|
|
297
|
+
if 'text' in content_json:
|
|
298
|
+
try:
|
|
299
|
+
# If text is a string, try to parse it as JSON
|
|
300
|
+
if isinstance(content_json['text'], str):
|
|
301
|
+
content_json['text'] = json.loads(content_json['text'])
|
|
302
|
+
except json.JSONDecodeError:
|
|
303
|
+
pass
|
|
304
|
+
chunks.append(content_json)
|
|
305
|
+
final_response = content_json
|
|
306
|
+
elif content.startswith('event: end_of_stream\r\n'):
|
|
307
|
+
# Process the final response to extract the answer
|
|
308
|
+
if final_response:
|
|
309
|
+
answer_text = self._extract_answer(final_response)
|
|
310
|
+
return SearchResponse(answer_text) if not raw else final_response
|
|
311
|
+
elif chunks:
|
|
312
|
+
answer_text = self._extract_answer(chunks[-1])
|
|
313
|
+
return SearchResponse(answer_text) if not raw else chunks[-1]
|
|
314
|
+
else:
|
|
315
|
+
return SearchResponse("") if not raw else {}
|
|
316
|
+
|
|
317
|
+
# If we get here, something went wrong
|
|
318
|
+
raise exceptions.FailedToGenerateResponseError("Failed to get complete response")
|
|
319
|
+
|
|
320
|
+
except requests.RequestsError as e:
|
|
321
|
+
raise exceptions.APIConnectionError(f"Connection error: {str(e)}")
|
|
322
|
+
except json.JSONDecodeError:
|
|
323
|
+
raise exceptions.FailedToGenerateResponseError("Failed to parse response JSON")
|
|
324
|
+
except Exception as e:
|
|
325
|
+
raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
if __name__ == "__main__":
|
|
329
|
+
# Simple test
|
|
330
|
+
ai = Perplexity()
|
|
331
|
+
response = ai.search("What is Python?")
|
|
332
|
+
print(response)
|
|
359
333
|
|