webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,359 +1,333 @@
1
- import json
2
- import random
3
- from uuid import uuid4
4
- from typing import Dict, Optional, Generator, Union, Any
5
- from curl_cffi import requests
6
-
7
- from webscout.AIbase import AISearch
8
- from webscout import exceptions
9
- from webscout.litagent import LitAgent
10
-
11
-
12
- class Response:
13
- """A wrapper class for Perplexity API responses.
14
-
15
- This class automatically converts response objects to their text representation
16
- when printed or converted to string.
17
-
18
- Attributes:
19
- text (str): The text content of the response
20
-
21
- Example:
22
- >>> response = Response("Hello, world!")
23
- >>> print(response)
24
- Hello, world!
25
- >>> str(response)
26
- 'Hello, world!'
27
- """
28
- def __init__(self, text: str):
29
- self.text = text
30
-
31
- def __str__(self):
32
- return self.text
33
-
34
- def __repr__(self):
35
- return self.text
36
-
37
-
38
- class Perplexity(AISearch):
39
- """A class to interact with the Perplexity AI search API.
40
-
41
- Perplexity provides a powerful search interface that returns AI-generated responses
42
- based on web content. It supports both streaming and non-streaming responses,
43
- multiple search modes, and model selection.
44
-
45
- Basic Usage:
46
- >>> from webscout import Perplexity
47
- >>> ai = Perplexity()
48
- >>> # Non-streaming example
49
- >>> response = ai.search("What is Python?")
50
- >>> print(response)
51
- Python is a high-level programming language...
52
-
53
- >>> # Streaming example
54
- >>> for chunk in ai.search("Tell me about AI", stream=True):
55
- ... print(chunk, end="", flush=True)
56
- Artificial Intelligence is...
57
-
58
- >>> # Pro search with specific model (requires authentication via cookies)
59
- >>> cookies = {"perplexity-user": "your_cookie_value"}
60
- >>> ai_pro = Perplexity(cookies=cookies)
61
- >>> response = ai_pro.search("Latest AI research", mode="pro", model="gpt-4o")
62
- >>> print(response)
63
-
64
- >>> # Raw response format
65
- >>> for chunk in ai.search("Hello", stream=True, raw=True):
66
- ... print(chunk)
67
- {'text': 'Hello'}
68
- {'text': ' there!'}
69
-
70
- Args:
71
- cookies (dict, optional): Cookies to use for authentication. Defaults to None.
72
- timeout (int, optional): Request timeout in seconds. Defaults to 60.
73
- proxies (dict, optional): Proxy configuration for requests. Defaults to None.
74
- """
75
-
76
- def __init__(
77
- self,
78
- cookies: Optional[Dict[str, str]] = None,
79
- timeout: int = 60,
80
- proxies: Optional[Dict[str, str]] = None
81
- ):
82
- """
83
- Initialize the Perplexity client.
84
-
85
- Args:
86
- cookies (dict, optional): Cookies to use for authentication. Defaults to None.
87
- timeout (int, optional): Request timeout in seconds. Defaults to 60.
88
- proxies (dict, optional): Proxy configuration for requests. Defaults to None.
89
- """
90
- self.timeout = timeout
91
- self.agent = LitAgent()
92
- self.session = requests.Session(headers={
93
- 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
94
- 'accept-language': 'en-US,en;q=0.9',
95
- 'cache-control': 'max-age=0',
96
- 'dnt': '1',
97
- 'priority': 'u=0, i',
98
- 'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"',
99
- 'sec-ch-ua-arch': '"x86"',
100
- 'sec-ch-ua-bitness': '"64"',
101
- 'sec-ch-ua-full-version': '"128.0.6613.120"',
102
- 'sec-ch-ua-full-version-list': '"Not;A=Brand";v="24.0.0.0", "Chromium";v="128.0.6613.120"',
103
- 'sec-ch-ua-mobile': '?0',
104
- 'sec-ch-ua-model': '""',
105
- 'sec-ch-ua-platform': '"Windows"',
106
- 'sec-ch-ua-platform-version': '"19.0.0"',
107
- 'sec-fetch-dest': 'document',
108
- 'sec-fetch-mode': 'navigate',
109
- 'sec-fetch-site': 'same-origin',
110
- 'sec-fetch-user': '?1',
111
- 'upgrade-insecure-requests': '1',
112
- 'user-agent': self.agent.random(),
113
- }, cookies=cookies or {}, impersonate='chrome')
114
-
115
- # Apply proxies if provided
116
- if proxies:
117
- self.session.proxies.update(proxies)
118
-
119
- # Initialize session with socket.io
120
- self.timestamp = format(random.getrandbits(32), '08x')
121
-
122
- # Get socket.io session ID
123
- response = self.session.get(f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}')
124
- self.sid = json.loads(response.text[1:])['sid']
125
-
126
- # Initialize socket.io connection
127
- assert (self.session.post(
128
- f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}&sid={self.sid}',
129
- data='40{"jwt":"anonymous-ask-user"}'
130
- )).text == 'OK'
131
-
132
- # Get session info
133
- self.session.get('https://www.perplexity.ai/api/auth/session')
134
-
135
- # Set default values
136
- self.copilot = 0 if not cookies else float('inf')
137
- self.file_upload = 0 if not cookies else float('inf')
138
-
139
- def _extract_answer(self, response):
140
- """
141
- Extract the answer from the response.
142
-
143
- Args:
144
- response (dict): The response from Perplexity AI.
145
-
146
- Returns:
147
- str: The extracted answer text.
148
- """
149
- if not response:
150
- return ""
151
-
152
- # Find the FINAL step in the text array
153
- final_step = None
154
- if 'text' in response and isinstance(response['text'], list):
155
- for step in response['text']:
156
- if step.get('step_type') == 'FINAL' and 'content' in step and 'answer' in step['content']:
157
- final_step = step
158
- break
159
-
160
- if not final_step:
161
- return ""
162
-
163
- try:
164
- # Parse the answer JSON string
165
- answer_json = json.loads(final_step['content']['answer'])
166
- return answer_json.get('answer', '')
167
- except (json.JSONDecodeError, KeyError):
168
- return ""
169
-
170
- def search(
171
- self,
172
- prompt: str,
173
- mode: str = 'auto',
174
- model: Optional[str] = None,
175
- sources: Optional[list] = None,
176
- stream: bool = False,
177
- raw: bool = False,
178
- language: str = 'en-US',
179
- follow_up: Optional[Dict[str, Any]] = None,
180
- incognito: bool = False
181
- ) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
182
- """Search using the Perplexity API and get AI-generated responses.
183
-
184
- This method sends a search query to Perplexity and returns the AI-generated response.
185
- It supports both streaming and non-streaming modes, as well as raw response format.
186
-
187
- Args:
188
- prompt (str): The search query or prompt to send to the API.
189
- mode (str, optional): Search mode. Options: 'auto', 'pro', 'reasoning', 'deep research'.
190
- Defaults to 'auto'.
191
- model (str, optional): Model to use. Available models depend on the mode. Defaults to None.
192
- sources (list, optional): Sources to use. Options: 'web', 'scholar', 'social'.
193
- Defaults to ['web'].
194
- stream (bool, optional): If True, yields response chunks as they arrive.
195
- If False, returns complete response. Defaults to False.
196
- raw (bool, optional): If True, returns raw response dictionaries.
197
- If False, returns Response objects that convert to text automatically.
198
- Defaults to False.
199
- language (str, optional): Language to use. Defaults to 'en-US'.
200
- follow_up (dict, optional): Follow-up information. Defaults to None.
201
- incognito (bool, optional): Whether to use incognito mode. Defaults to False.
202
-
203
- Returns:
204
- If stream=True: Generator yielding response chunks as they arrive
205
- If stream=False: Complete response
206
-
207
- Raises:
208
- ValueError: If invalid mode or model is provided
209
- exceptions.APIConnectionError: If connection to API fails
210
- exceptions.FailedToGenerateResponseError: If response generation fails
211
- """
212
- if sources is None:
213
- sources = ['web']
214
-
215
- # Validate inputs
216
- if mode not in ['auto', 'pro', 'reasoning', 'deep research']:
217
- raise ValueError('Search modes -> ["auto", "pro", "reasoning", "deep research"]')
218
-
219
- if not all([source in ('web', 'scholar', 'social') for source in sources]):
220
- raise ValueError('Sources -> ["web", "scholar", "social"]')
221
-
222
- # Check if model is valid for the selected mode
223
- valid_models = {
224
- 'auto': [None],
225
- 'pro': [None, 'sonar', 'gpt-4.5', 'gpt-4o', 'claude 3.7 sonnet', 'gemini 2.0 flash', 'grok-2'],
226
- 'reasoning': [None, 'r1', 'o3-mini', 'claude 3.7 sonnet'],
227
- 'deep research': [None]
228
- }
229
-
230
- if mode in valid_models and model not in valid_models[mode] and model is not None:
231
- raise ValueError(f'Invalid model for {mode} mode. Valid models: {valid_models[mode]}')
232
-
233
- # Prepare request data
234
- json_data = {
235
- 'query_str': prompt,
236
- 'params': {
237
- 'attachments': follow_up['attachments'] if follow_up else [],
238
- 'frontend_context_uuid': str(uuid4()),
239
- 'frontend_uuid': str(uuid4()),
240
- 'is_incognito': incognito,
241
- 'language': language,
242
- 'last_backend_uuid': follow_up['backend_uuid'] if follow_up else None,
243
- 'mode': 'concise' if mode == 'auto' else 'copilot',
244
- 'model_preference': {
245
- 'auto': {
246
- None: 'turbo'
247
- },
248
- 'pro': {
249
- None: 'pplx_pro',
250
- 'sonar': 'experimental',
251
- 'gpt-4.5': 'gpt45',
252
- 'gpt-4o': 'gpt4o',
253
- 'claude 3.7 sonnet': 'claude2',
254
- 'gemini 2.0 flash': 'gemini2flash',
255
- 'grok-2': 'grok'
256
- },
257
- 'reasoning': {
258
- None: 'pplx_reasoning',
259
- 'r1': 'r1',
260
- 'o3-mini': 'o3mini',
261
- 'claude 3.7 sonnet': 'claude37sonnetthinking'
262
- },
263
- 'deep research': {
264
- None: 'pplx_alpha'
265
- }
266
- }[mode][model],
267
- 'source': 'default',
268
- 'sources': sources,
269
- 'version': '2.18'
270
- }
271
- }
272
-
273
- try:
274
- # Make the request
275
- resp = self.session.post(
276
- 'https://www.perplexity.ai/rest/sse/perplexity_ask',
277
- json=json_data,
278
- stream=True,
279
- timeout=self.timeout
280
- )
281
-
282
- if resp.status_code != 200:
283
- raise exceptions.APIConnectionError(f"API returned status code {resp.status_code}")
284
-
285
- # Define streaming response handler
286
- def stream_response():
287
- for chunk in resp.iter_lines(delimiter=b'\r\n\r\n'):
288
- content = chunk.decode('utf-8')
289
- if content.startswith('event: message\r\n'):
290
- content_json = json.loads(content[len('event: message\r\ndata: '):])
291
- if 'text' in content_json:
292
- try:
293
- # If text is a string, try to parse it as JSON
294
- if isinstance(content_json['text'], str):
295
- content_json['text'] = json.loads(content_json['text'])
296
- except json.JSONDecodeError:
297
- pass
298
-
299
- if raw:
300
- yield content_json
301
- else:
302
- # For non-raw responses, extract text from each chunk
303
- if 'text' in content_json and isinstance(content_json['text'], list):
304
- for step in content_json['text']:
305
- if step.get('type') == 'answer' and 'value' in step:
306
- yield Response(step['value'])
307
- elif step.get('type') == 'thinking' and 'value' in step:
308
- yield Response(step['value'])
309
- elif content.startswith('event: end_of_stream\r\n'):
310
- return
311
-
312
- # Handle streaming or non-streaming response
313
- if stream:
314
- return stream_response()
315
- else:
316
- chunks = []
317
- final_response = None
318
-
319
- for chunk in resp.iter_lines(delimiter=b'\r\n\r\n'):
320
- content = chunk.decode('utf-8')
321
- if content.startswith('event: message\r\n'):
322
- content_json = json.loads(content[len('event: message\r\ndata: '):])
323
- if 'text' in content_json:
324
- try:
325
- # If text is a string, try to parse it as JSON
326
- if isinstance(content_json['text'], str):
327
- content_json['text'] = json.loads(content_json['text'])
328
- except json.JSONDecodeError:
329
- pass
330
- chunks.append(content_json)
331
- final_response = content_json
332
- elif content.startswith('event: end_of_stream\r\n'):
333
- # Process the final response to extract the answer
334
- if final_response:
335
- answer_text = self._extract_answer(final_response)
336
- return Response(answer_text) if not raw else final_response
337
- elif chunks:
338
- answer_text = self._extract_answer(chunks[-1])
339
- return Response(answer_text) if not raw else chunks[-1]
340
- else:
341
- return Response("") if not raw else {}
342
-
343
- # If we get here, something went wrong
344
- raise exceptions.FailedToGenerateResponseError("Failed to get complete response")
345
-
346
- except requests.RequestsError as e:
347
- raise exceptions.APIConnectionError(f"Connection error: {str(e)}")
348
- except json.JSONDecodeError:
349
- raise exceptions.FailedToGenerateResponseError("Failed to parse response JSON")
350
- except Exception as e:
351
- raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
352
-
353
-
354
- if __name__ == "__main__":
355
- # Simple test
356
- ai = Perplexity()
357
- response = ai.search("What is Python?")
358
- print(response)
1
+ import json
2
+ import random
3
+ from uuid import uuid4
4
+ from typing import Dict, Optional, Generator, Union, Any
5
+ from curl_cffi import requests
6
+
7
+ from webscout.AIbase import AISearch, SearchResponse
8
+ from webscout import exceptions
9
+ from webscout.litagent import LitAgent
10
+
11
+
12
+ class Perplexity(AISearch):
13
+ """A class to interact with the Perplexity AI search API.
14
+
15
+ Perplexity provides a powerful search interface that returns AI-generated responses
16
+ based on web content. It supports both streaming and non-streaming responses,
17
+ multiple search modes, and model selection.
18
+
19
+ Basic Usage:
20
+ >>> from webscout import Perplexity
21
+ >>> ai = Perplexity()
22
+ >>> # Non-streaming example
23
+ >>> response = ai.search("What is Python?")
24
+ >>> print(response)
25
+ Python is a high-level programming language...
26
+
27
+ >>> # Streaming example
28
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
29
+ ... print(chunk, end="", flush=True)
30
+ Artificial Intelligence is...
31
+
32
+ >>> # Pro search with specific model (requires authentication via cookies)
33
+ >>> cookies = {"perplexity-user": "your_cookie_value"}
34
+ >>> ai_pro = Perplexity(cookies=cookies)
35
+ >>> response = ai_pro.search("Latest AI research", mode="pro", model="gpt-4o")
36
+ >>> print(response)
37
+
38
+ >>> # Raw response format
39
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
40
+ ... print(chunk)
41
+ {'text': 'Hello'}
42
+ {'text': ' there!'}
43
+
44
+ Args:
45
+ cookies (dict, optional): Cookies to use for authentication. Defaults to None.
46
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
47
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
48
+ """
49
+
50
+ def __init__(
51
+ self,
52
+ cookies: Optional[Dict[str, str]] = None,
53
+ timeout: int = 60,
54
+ proxies: Optional[Dict[str, str]] = None
55
+ ):
56
+ """
57
+ Initialize the Perplexity client.
58
+
59
+ Args:
60
+ cookies (dict, optional): Cookies to use for authentication. Defaults to None.
61
+ timeout (int, optional): Request timeout in seconds. Defaults to 60.
62
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
63
+ """
64
+ self.timeout = timeout
65
+ self.agent = LitAgent()
66
+ self.session = requests.Session(headers={
67
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
68
+ 'accept-language': 'en-US,en;q=0.9',
69
+ 'cache-control': 'max-age=0',
70
+ 'dnt': '1',
71
+ 'priority': 'u=0, i',
72
+ 'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"',
73
+ 'sec-ch-ua-arch': '"x86"',
74
+ 'sec-ch-ua-bitness': '"64"',
75
+ 'sec-ch-ua-full-version': '"128.0.6613.120"',
76
+ 'sec-ch-ua-full-version-list': '"Not;A=Brand";v="24.0.0.0", "Chromium";v="128.0.6613.120"',
77
+ 'sec-ch-ua-mobile': '?0',
78
+ 'sec-ch-ua-model': '""',
79
+ 'sec-ch-ua-platform': '"Windows"',
80
+ 'sec-ch-ua-platform-version': '"19.0.0"',
81
+ 'sec-fetch-dest': 'document',
82
+ 'sec-fetch-mode': 'navigate',
83
+ 'sec-fetch-site': 'same-origin',
84
+ 'sec-fetch-user': '?1',
85
+ 'upgrade-insecure-requests': '1',
86
+ 'user-agent': self.agent.random(),
87
+ }, cookies=cookies or {}, impersonate='chrome')
88
+
89
+ # Apply proxies if provided
90
+ if proxies:
91
+ self.session.proxies.update(proxies)
92
+
93
+ # Initialize session with socket.io
94
+ self.timestamp = format(random.getrandbits(32), '08x')
95
+
96
+ # Get socket.io session ID
97
+ response = self.session.get(f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}')
98
+ self.sid = json.loads(response.text[1:])['sid']
99
+
100
+ # Initialize socket.io connection
101
+ assert (self.session.post(
102
+ f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}&sid={self.sid}',
103
+ data='40{"jwt":"anonymous-ask-user"}'
104
+ )).text == 'OK'
105
+
106
+ # Get session info
107
+ self.session.get('https://www.perplexity.ai/api/auth/session')
108
+
109
+ # Set default values
110
+ self.copilot = 0 if not cookies else float('inf')
111
+ self.file_upload = 0 if not cookies else float('inf')
112
+
113
+ def _extract_answer(self, response):
114
+ """
115
+ Extract the answer from the response.
116
+
117
+ Args:
118
+ response (dict): The response from Perplexity AI.
119
+
120
+ Returns:
121
+ str: The extracted answer text.
122
+ """
123
+ if not response:
124
+ return ""
125
+
126
+ # Find the FINAL step in the text array
127
+ final_step = None
128
+ if 'text' in response and isinstance(response['text'], list):
129
+ for step in response['text']:
130
+ if step.get('step_type') == 'FINAL' and 'content' in step and 'answer' in step['content']:
131
+ final_step = step
132
+ break
133
+
134
+ if not final_step:
135
+ return ""
136
+
137
+ try:
138
+ # Parse the answer JSON string
139
+ answer_json = json.loads(final_step['content']['answer'])
140
+ return answer_json.get('answer', '')
141
+ except (json.JSONDecodeError, KeyError):
142
+ return ""
143
+
144
+ def search(
145
+ self,
146
+ prompt: str,
147
+ mode: str = 'auto',
148
+ model: Optional[str] = None,
149
+ sources: Optional[list] = None,
150
+ stream: bool = False,
151
+ raw: bool = False,
152
+ language: str = 'en-US',
153
+ follow_up: Optional[Dict[str, Any]] = None,
154
+ incognito: bool = False
155
+ ) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse], None, None]]:
156
+ """Search using the Perplexity API and get AI-generated responses.
157
+
158
+ This method sends a search query to Perplexity and returns the AI-generated response.
159
+ It supports both streaming and non-streaming modes, as well as raw response format.
160
+
161
+ Args:
162
+ prompt (str): The search query or prompt to send to the API.
163
+ mode (str, optional): Search mode. Options: 'auto', 'pro', 'reasoning', 'deep research'.
164
+ Defaults to 'auto'.
165
+ model (str, optional): Model to use. Available models depend on the mode. Defaults to None.
166
+ sources (list, optional): Sources to use. Options: 'web', 'scholar', 'social'.
167
+ Defaults to ['web'].
168
+ stream (bool, optional): If True, yields response chunks as they arrive.
169
+ If False, returns complete response. Defaults to False.
170
+ raw (bool, optional): If True, returns raw response dictionaries.
171
+ If False, returns Response objects that convert to text automatically.
172
+ Defaults to False.
173
+ language (str, optional): Language to use. Defaults to 'en-US'.
174
+ follow_up (dict, optional): Follow-up information. Defaults to None.
175
+ incognito (bool, optional): Whether to use incognito mode. Defaults to False.
176
+
177
+ Returns:
178
+ If stream=True: Generator yielding response chunks as they arrive
179
+ If stream=False: Complete response
180
+
181
+ Raises:
182
+ ValueError: If invalid mode or model is provided
183
+ exceptions.APIConnectionError: If connection to API fails
184
+ exceptions.FailedToGenerateResponseError: If response generation fails
185
+ """
186
+ if sources is None:
187
+ sources = ['web']
188
+
189
+ # Validate inputs
190
+ if mode not in ['auto', 'pro', 'reasoning', 'deep research']:
191
+ raise ValueError('Search modes -> ["auto", "pro", "reasoning", "deep research"]')
192
+
193
+ if not all([source in ('web', 'scholar', 'social') for source in sources]):
194
+ raise ValueError('Sources -> ["web", "scholar", "social"]')
195
+
196
+ # Check if model is valid for the selected mode
197
+ valid_models = {
198
+ 'auto': [None],
199
+ 'pro': [None, 'sonar', 'gpt-4.5', 'gpt-4o', 'claude 3.7 sonnet', 'gemini 2.0 flash', 'grok-2'],
200
+ 'reasoning': [None, 'r1', 'o3-mini', 'claude 3.7 sonnet'],
201
+ 'deep research': [None]
202
+ }
203
+
204
+ if mode in valid_models and model not in valid_models[mode] and model is not None:
205
+ raise ValueError(f'Invalid model for {mode} mode. Valid models: {valid_models[mode]}')
206
+
207
+ # Prepare request data
208
+ json_data = {
209
+ 'query_str': prompt,
210
+ 'params': {
211
+ 'attachments': follow_up['attachments'] if follow_up else [],
212
+ 'frontend_context_uuid': str(uuid4()),
213
+ 'frontend_uuid': str(uuid4()),
214
+ 'is_incognito': incognito,
215
+ 'language': language,
216
+ 'last_backend_uuid': follow_up['backend_uuid'] if follow_up else None,
217
+ 'mode': 'concise' if mode == 'auto' else 'copilot',
218
+ 'model_preference': {
219
+ 'auto': {
220
+ None: 'turbo'
221
+ },
222
+ 'pro': {
223
+ None: 'pplx_pro',
224
+ 'sonar': 'experimental',
225
+ 'gpt-4.5': 'gpt45',
226
+ 'gpt-4o': 'gpt4o',
227
+ 'claude 3.7 sonnet': 'claude2',
228
+ 'gemini 2.0 flash': 'gemini2flash',
229
+ 'grok-2': 'grok'
230
+ },
231
+ 'reasoning': {
232
+ None: 'pplx_reasoning',
233
+ 'r1': 'r1',
234
+ 'o3-mini': 'o3mini',
235
+ 'claude 3.7 sonnet': 'claude37sonnetthinking'
236
+ },
237
+ 'deep research': {
238
+ None: 'pplx_alpha'
239
+ }
240
+ }[mode][model],
241
+ 'source': 'default',
242
+ 'sources': sources,
243
+ 'version': '2.18'
244
+ }
245
+ }
246
+
247
+ try:
248
+ # Make the request
249
+ resp = self.session.post(
250
+ 'https://www.perplexity.ai/rest/sse/perplexity_ask',
251
+ json=json_data,
252
+ stream=True,
253
+ timeout=self.timeout
254
+ )
255
+
256
+ if resp.status_code != 200:
257
+ raise exceptions.APIConnectionError(f"API returned status code {resp.status_code}")
258
+
259
+ # Define streaming response handler
260
+ def stream_response():
261
+ for chunk in resp.iter_lines(delimiter=b'\r\n\r\n'):
262
+ content = chunk.decode('utf-8')
263
+ if content.startswith('event: message\r\n'):
264
+ content_json = json.loads(content[len('event: message\r\ndata: '):])
265
+ if 'text' in content_json:
266
+ try:
267
+ # If text is a string, try to parse it as JSON
268
+ if isinstance(content_json['text'], str):
269
+ content_json['text'] = json.loads(content_json['text'])
270
+ except json.JSONDecodeError:
271
+ pass
272
+
273
+ if raw:
274
+ yield content_json
275
+ else:
276
+ # For non-raw responses, extract text from each chunk
277
+ if 'text' in content_json and isinstance(content_json['text'], list):
278
+ for step in content_json['text']:
279
+ if step.get('type') == 'answer' and 'value' in step:
280
+ yield SearchResponse(step['value'])
281
+ elif step.get('type') == 'thinking' and 'value' in step:
282
+ yield SearchResponse(step['value'])
283
+ elif content.startswith('event: end_of_stream\r\n'):
284
+ return
285
+
286
+ # Handle streaming or non-streaming response
287
+ if stream:
288
+ return stream_response()
289
+ else:
290
+ chunks = []
291
+ final_response = None
292
+
293
+ for chunk in resp.iter_lines(delimiter=b'\r\n\r\n'):
294
+ content = chunk.decode('utf-8')
295
+ if content.startswith('event: message\r\n'):
296
+ content_json = json.loads(content[len('event: message\r\ndata: '):])
297
+ if 'text' in content_json:
298
+ try:
299
+ # If text is a string, try to parse it as JSON
300
+ if isinstance(content_json['text'], str):
301
+ content_json['text'] = json.loads(content_json['text'])
302
+ except json.JSONDecodeError:
303
+ pass
304
+ chunks.append(content_json)
305
+ final_response = content_json
306
+ elif content.startswith('event: end_of_stream\r\n'):
307
+ # Process the final response to extract the answer
308
+ if final_response:
309
+ answer_text = self._extract_answer(final_response)
310
+ return SearchResponse(answer_text) if not raw else final_response
311
+ elif chunks:
312
+ answer_text = self._extract_answer(chunks[-1])
313
+ return SearchResponse(answer_text) if not raw else chunks[-1]
314
+ else:
315
+ return SearchResponse("") if not raw else {}
316
+
317
+ # If we get here, something went wrong
318
+ raise exceptions.FailedToGenerateResponseError("Failed to get complete response")
319
+
320
+ except requests.RequestsError as e:
321
+ raise exceptions.APIConnectionError(f"Connection error: {str(e)}")
322
+ except json.JSONDecodeError:
323
+ raise exceptions.FailedToGenerateResponseError("Failed to parse response JSON")
324
+ except Exception as e:
325
+ raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
326
+
327
+
328
+ if __name__ == "__main__":
329
+ # Simple test
330
+ ai = Perplexity()
331
+ response = ai.search("What is Python?")
332
+ print(response)
359
333