perplexity-webui-scraper 0.3.6__tar.gz → 0.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {perplexity_webui_scraper-0.3.6 → perplexity_webui_scraper-0.4.0}/PKG-INFO +125 -106
  2. perplexity_webui_scraper-0.4.0/README.md +287 -0
  3. {perplexity_webui_scraper-0.3.6 → perplexity_webui_scraper-0.4.0}/pyproject.toml +8 -9
  4. {perplexity_webui_scraper-0.3.6 → perplexity_webui_scraper-0.4.0}/src/perplexity_webui_scraper/__init__.py +24 -3
  5. {perplexity_webui_scraper-0.3.6 → perplexity_webui_scraper-0.4.0}/src/perplexity_webui_scraper/cli/get_perplexity_session_token.py +21 -53
  6. perplexity_webui_scraper-0.4.0/src/perplexity_webui_scraper/config.py +48 -0
  7. perplexity_webui_scraper-0.4.0/src/perplexity_webui_scraper/constants.py +46 -0
  8. {perplexity_webui_scraper-0.3.6 → perplexity_webui_scraper-0.4.0}/src/perplexity_webui_scraper/core.py +18 -154
  9. perplexity_webui_scraper-0.4.0/src/perplexity_webui_scraper/enums.py +85 -0
  10. {perplexity_webui_scraper-0.3.6 → perplexity_webui_scraper-0.4.0}/src/perplexity_webui_scraper/exceptions.py +29 -50
  11. perplexity_webui_scraper-0.4.0/src/perplexity_webui_scraper/http.py +240 -0
  12. perplexity_webui_scraper-0.4.0/src/perplexity_webui_scraper/limits.py +15 -0
  13. perplexity_webui_scraper-0.4.0/src/perplexity_webui_scraper/logging.py +121 -0
  14. perplexity_webui_scraper-0.4.0/src/perplexity_webui_scraper/mcp/__init__.py +14 -0
  15. {perplexity_webui_scraper-0.3.6 → perplexity_webui_scraper-0.4.0}/src/perplexity_webui_scraper/mcp/__main__.py +1 -3
  16. perplexity_webui_scraper-0.4.0/src/perplexity_webui_scraper/mcp/server.py +189 -0
  17. perplexity_webui_scraper-0.4.0/src/perplexity_webui_scraper/models.py +65 -0
  18. perplexity_webui_scraper-0.4.0/src/perplexity_webui_scraper/resilience.py +98 -0
  19. perplexity_webui_scraper-0.4.0/src/perplexity_webui_scraper/types.py +49 -0
  20. perplexity_webui_scraper-0.3.6/README.md +0 -268
  21. perplexity_webui_scraper-0.3.6/src/perplexity_webui_scraper/config.py +0 -65
  22. perplexity_webui_scraper-0.3.6/src/perplexity_webui_scraper/constants.py +0 -84
  23. perplexity_webui_scraper-0.3.6/src/perplexity_webui_scraper/enums.py +0 -147
  24. perplexity_webui_scraper-0.3.6/src/perplexity_webui_scraper/http.py +0 -533
  25. perplexity_webui_scraper-0.3.6/src/perplexity_webui_scraper/limits.py +0 -25
  26. perplexity_webui_scraper-0.3.6/src/perplexity_webui_scraper/logging.py +0 -278
  27. perplexity_webui_scraper-0.3.6/src/perplexity_webui_scraper/mcp/__init__.py +0 -20
  28. perplexity_webui_scraper-0.3.6/src/perplexity_webui_scraper/mcp/server.py +0 -166
  29. perplexity_webui_scraper-0.3.6/src/perplexity_webui_scraper/models.py +0 -109
  30. perplexity_webui_scraper-0.3.6/src/perplexity_webui_scraper/resilience.py +0 -181
  31. perplexity_webui_scraper-0.3.6/src/perplexity_webui_scraper/types.py +0 -56
  32. {perplexity_webui_scraper-0.3.6 → perplexity_webui_scraper-0.4.0}/src/perplexity_webui_scraper/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: perplexity-webui-scraper
3
- Version: 0.3.6
3
+ Version: 0.4.0
4
4
  Summary: Python scraper to extract AI responses from Perplexity's web interface.
5
5
  Keywords: perplexity,ai,scraper,webui,api,client
6
6
  Author: henrique-coder
@@ -21,16 +21,16 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
21
21
  Classifier: Typing :: Typed
22
22
  Requires-Dist: curl-cffi>=0.14.0
23
23
  Requires-Dist: loguru>=0.7.3
24
- Requires-Dist: orjson>=3.11.5
24
+ Requires-Dist: orjson>=3.11.6
25
25
  Requires-Dist: pydantic>=2.12.5
26
26
  Requires-Dist: tenacity>=9.1.2
27
- Requires-Dist: fastmcp>=2.14.2 ; extra == 'mcp'
28
- Requires-Python: >=3.10
29
- Project-URL: Changelog, https://github.com/henrique-coder/perplexity-webui-scraper/releases
30
- Project-URL: Documentation, https://github.com/henrique-coder/perplexity-webui-scraper#readme
27
+ Requires-Dist: fastmcp>=2.14.4 ; extra == 'mcp'
28
+ Requires-Python: >=3.10, <3.15
31
29
  Project-URL: Homepage, https://github.com/henrique-coder/perplexity-webui-scraper
32
- Project-URL: Issues, https://github.com/henrique-coder/perplexity-webui-scraper/issues
30
+ Project-URL: Documentation, https://github.com/henrique-coder/perplexity-webui-scraper#readme
33
31
  Project-URL: Repository, https://github.com/henrique-coder/perplexity-webui-scraper.git
32
+ Project-URL: Issues, https://github.com/henrique-coder/perplexity-webui-scraper/issues
33
+ Project-URL: Changelog, https://github.com/henrique-coder/perplexity-webui-scraper/releases
34
34
  Provides-Extra: mcp
35
35
  Description-Content-Type: text/markdown
36
36
 
@@ -50,26 +50,42 @@ Python scraper to extract AI responses from [Perplexity's](https://www.perplexit
50
50
 
51
51
  ## Installation
52
52
 
53
+ ### As a Library
54
+
53
55
  ```bash
54
- uv pip install perplexity-webui-scraper # from PyPI (stable)
55
- uv pip install git+https://github.com/henrique-coder/perplexity-webui-scraper.git@dev # from GitHub (development)
56
+ # From PyPI (stable)
57
+ uv add perplexity-webui-scraper
58
+
59
+ # From GitHub dev branch (latest features)
60
+ uv add git+https://github.com/henrique-coder/perplexity-webui-scraper.git@dev
61
+ ```
62
+
63
+ ### As MCP Server
64
+
65
+ No installation required - `uvx` handles everything automatically:
66
+
67
+ ```bash
68
+ # From PyPI (stable)
69
+ uvx --from perplexity-webui-scraper[mcp] perplexity-webui-scraper-mcp
70
+
71
+ # From GitHub dev branch (latest features)
72
+ uvx --from "perplexity-webui-scraper[mcp] @ git+https://github.com/henrique-coder/perplexity-webui-scraper.git@dev" perplexity-webui-scraper-mcp
73
+
74
+ # From local directory (for development)
75
+ uv --directory /path/to/perplexity-webui-scraper run perplexity-webui-scraper-mcp
56
76
  ```
57
77
 
58
78
  ## Requirements
59
79
 
60
80
  - **Perplexity Pro/Max account**
61
- - **Session token** (`__Secure-next-auth.session-token` cookie from your browser)
81
+ - **Session token** (`__Secure-next-auth.session-token` cookie)
62
82
 
63
83
  ### Getting Your Session Token
64
84
 
65
- You can obtain your session token in two ways:
66
-
67
85
  #### Option 1: Automatic (CLI Tool)
68
86
 
69
- The package includes a CLI tool to automatically generate and save your session token:
70
-
71
87
  ```bash
72
- get-perplexity-session-token
88
+ uv run get-perplexity-session-token
73
89
  ```
74
90
 
75
91
  This interactive tool will:
@@ -80,17 +96,8 @@ This interactive tool will:
80
96
  4. Extract and display your session token
81
97
  5. Optionally save it to your `.env` file
82
98
 
83
- **Features:**
84
-
85
- - Secure ephemeral session (cleared on exit)
86
- - Automatic `.env` file management
87
- - Support for both OTP codes and magic links
88
- - Clean terminal interface with status updates
89
-
90
99
  #### Option 2: Manual (Browser)
91
100
 
92
- If you prefer to extract the token manually:
93
-
94
101
  1. Log in at [perplexity.ai](https://www.perplexity.ai)
95
102
  2. Open DevTools (`F12`) → Application/Storage → Cookies
96
103
  3. Copy the value of `__Secure-next-auth.session-token`
@@ -107,7 +114,7 @@ conversation = client.create_conversation()
107
114
  conversation.ask("What is quantum computing?")
108
115
  print(conversation.answer)
109
116
 
110
- # Follow-up
117
+ # Follow-up (context is preserved)
111
118
  conversation.ask("Explain it simpler")
112
119
  print(conversation.answer)
113
120
  ```
@@ -130,17 +137,17 @@ from perplexity_webui_scraper import (
130
137
  )
131
138
 
132
139
  config = ConversationConfig(
133
- model=Models.RESEARCH,
140
+ model=Models.DEEP_RESEARCH,
134
141
  source_focus=[SourceFocus.WEB, SourceFocus.ACADEMIC],
135
142
  language="en-US",
136
- coordinates=Coordinates(latitude=40.7128, longitude=-74.0060),
143
+ coordinates=Coordinates(latitude=12.3456, longitude=-98.7654),
137
144
  )
138
145
 
139
146
  conversation = client.create_conversation(config)
140
147
  conversation.ask("Latest AI research", files=["paper.pdf"])
141
148
  ```
142
149
 
143
- ## API
150
+ ## API Reference
144
151
 
145
152
  ### `Perplexity(session_token, config?)`
146
153
 
@@ -163,22 +170,22 @@ conversation.ask("Latest AI research", files=["paper.pdf"])
163
170
 
164
171
  | Model | Description |
165
172
  | ---------------------------------- | ------------------------------------------------------------------------- |
166
- | `Models.RESEARCH` | Research - Fast and thorough for routine research |
167
- | `Models.LABS` | Labs - Multi-step tasks with advanced troubleshooting |
168
- | `Models.BEST` | Best - Automatically selects the most responsive model based on the query |
169
- | `Models.SONAR` | Sonar - Perplexity's fast model |
173
+ | `Models.BEST` | Automatically selects the best model based on the query |
174
+ | `Models.DEEP_RESEARCH` | Create in-depth reports with more sources, charts, and advanced reasoning |
175
+ | `Models.CREATE_FILES_AND_APPS` | Turn your ideas into docs, slides, dashboards, and more |
176
+ | `Models.SONAR` | Perplexity's latest model |
170
177
  | `Models.GPT_52` | GPT-5.2 - OpenAI's latest model |
171
- | `Models.GPT_52_THINKING` | GPT-5.2 Thinking - OpenAI's latest model with thinking |
178
+ | `Models.GPT_52_THINKING` | GPT-5.2 - OpenAI's latest model (thinking) |
179
+ | `Models.CLAUDE_45_SONNET` | Claude Sonnet 4.5 - Anthropic's fast model |
180
+ | `Models.CLAUDE_45_SONNET_THINKING` | Claude Sonnet 4.5 - Anthropic's fast model (thinking) |
172
181
  | `Models.CLAUDE_45_OPUS` | Claude Opus 4.5 - Anthropic's Opus reasoning model |
173
- | `Models.CLAUDE_45_OPUS_THINKING` | Claude Opus 4.5 Thinking - Anthropic's Opus reasoning model with thinking |
174
- | `Models.GEMINI_3_PRO` | Gemini 3 Pro - Google's newest reasoning model |
175
- | `Models.GEMINI_3_FLASH` | Gemini 3 Flash - Google's fast reasoning model |
176
- | `Models.GEMINI_3_FLASH_THINKING` | Gemini 3 Flash Thinking - Google's fast reasoning model with thinking |
177
- | `Models.GROK_41` | Grok 4.1 - xAI's latest advanced model |
178
- | `Models.GROK_41_THINKING` | Grok 4.1 Thinking - xAI's latest reasoning model |
179
- | `Models.KIMI_K2_THINKING` | Kimi K2 Thinking - Moonshot AI's latest reasoning model |
180
- | `Models.CLAUDE_45_SONNET` | Claude Sonnet 4.5 - Anthropic's newest advanced model |
181
- | `Models.CLAUDE_45_SONNET_THINKING` | Claude Sonnet 4.5 Thinking - Anthropic's newest reasoning model |
182
+ | `Models.CLAUDE_45_OPUS_THINKING` | Claude Opus 4.5 - Anthropic's Opus reasoning model (thinking) |
183
+ | `Models.GEMINI_3_FLASH` | Gemini 3 Flash - Google's fast model |
184
+ | `Models.GEMINI_3_FLASH_THINKING` | Gemini 3 Flash - Google's fast model (thinking) |
185
+ | `Models.GEMINI_3_PRO_THINKING` | Gemini 3 Pro - Google's most advanced model (thinking) |
186
+ | `Models.GROK_41` | Grok 4.1 - xAI's latest model |
187
+ | `Models.GROK_41_THINKING` | Grok 4.1 - xAI's latest model (thinking) |
188
+ | `Models.KIMI_K25_THINKING` | Kimi K2.5 - Moonshot AI's latest model |
182
189
 
183
190
  ### CitationMode
184
191
 
@@ -204,76 +211,80 @@ conversation.ask("Latest AI research", files=["paper.pdf"])
204
211
 
205
212
  ## Exceptions
206
213
 
207
- The library provides specific exception types for better error handling:
214
+ | Exception | Description |
215
+ | ---------------------------------- | -------------------------------------------------- |
216
+ | `PerplexityError` | Base exception for all library errors |
217
+ | `HTTPError` | HTTP error with status code and response body |
218
+ | `AuthenticationError` | Session token is invalid or expired (HTTP 401/403) |
219
+ | `RateLimitError` | Rate limit exceeded (HTTP 429) |
220
+ | `FileUploadError` | File upload failed |
221
+ | `FileValidationError` | File validation failed (size, type, etc.) |
222
+ | `ResearchClarifyingQuestionsError` | Research mode asking clarifying questions |
223
+ | `ResponseParsingError` | API response could not be parsed |
224
+ | `StreamingError` | Error during streaming response |
208
225
 
209
- | Exception | Description |
210
- | ---------------------------------- | ------------------------------------------------------------ |
211
- | `PerplexityError` | Base exception for all library errors |
212
- | `AuthenticationError` | Session token is invalid or expired (HTTP 403) |
213
- | `RateLimitError` | Rate limit exceeded (HTTP 429) |
214
- | `FileUploadError` | File upload failed |
215
- | `FileValidationError` | File validation failed (size, type, etc.) |
216
- | `ResearchClarifyingQuestionsError` | Research mode is asking clarifying questions (not supported) |
217
- | `ResponseParsingError` | API response could not be parsed |
218
- | `StreamingError` | Error during streaming response |
226
+ ## MCP Server (Model Context Protocol)
219
227
 
220
- ### Handling Research Mode Clarifying Questions
228
+ The library includes an MCP server for AI assistants like Claude Desktop and Antigravity.
221
229
 
222
- When using Research mode (`Models.RESEARCH`), the API may ask clarifying questions before providing an answer. Since programmatic interaction is not supported, the library raises a `ResearchClarifyingQuestionsError` with the questions:
230
+ Each AI model is exposed as a separate tool - enable only the ones you need to reduce agent context size.
223
231
 
224
- ```python
225
- from perplexity_webui_scraper import (
226
- Perplexity,
227
- ResearchClarifyingQuestionsError,
228
- )
229
-
230
- try:
231
- conversation.ask("Research this topic", model=Models.RESEARCH)
232
- except ResearchClarifyingQuestionsError as error:
233
- print("The AI needs clarification:")
234
- for question in error.questions:
235
- print(f" - {question}")
236
- # Consider rephrasing your query to be more specific
237
- ```
232
+ ### Configuration
238
233
 
239
- ## MCP Server (Model Context Protocol)
234
+ Add to your MCP config file (no installation required):
240
235
 
241
- The library includes an MCP server that allows AI assistants (like Claude) to search using Perplexity AI directly.
236
+ **Claude Desktop** (`~/.config/claude/claude_desktop_config.json`):
242
237
 
243
- ### Installation
244
-
245
- ```bash
246
- uv pip install perplexity-webui-scraper[mcp]
238
+ ```json
239
+ {
240
+ "mcpServers": {
241
+ "perplexity-webui-scraper": {
242
+ "command": "uvx",
243
+ "args": [
244
+ "--from",
245
+ "perplexity-webui-scraper[mcp]",
246
+ "perplexity-webui-scraper-mcp"
247
+ ],
248
+ "env": {
249
+ "PERPLEXITY_SESSION_TOKEN": "your_token_here"
250
+ }
251
+ }
252
+ }
253
+ }
247
254
  ```
248
255
 
249
- ### Running the Server
250
-
251
- ```bash
252
- # Set your session token
253
- export PERPLEXITY_SESSION_TOKEN="your_token_here" # For Linux/Mac
254
- set PERPLEXITY_SESSION_TOKEN="your_token_here" # For Windows
256
+ **From GitHub dev branch:**
255
257
 
256
- # Run with FastMCP
257
- uv run fastmcp run src/perplexity_webui_scraper/mcp/server.py
258
-
259
- # Or test with the dev inspector
260
- uv run fastmcp dev src/perplexity_webui_scraper/mcp/server.py
258
+ ```json
259
+ {
260
+ "mcpServers": {
261
+ "perplexity-webui-scraper": {
262
+ "command": "uvx",
263
+ "args": [
264
+ "--from",
265
+ "perplexity-webui-scraper[mcp] @ git+https://github.com/henrique-coder/perplexity-webui-scraper.git@dev",
266
+ "perplexity-webui-scraper-mcp"
267
+ ],
268
+ "env": {
269
+ "PERPLEXITY_SESSION_TOKEN": "your_token_here"
270
+ }
271
+ }
272
+ }
273
+ }
261
274
  ```
262
275
 
263
- ### Claude Desktop Configuration
264
-
265
- Add to `~/.config/claude/claude_desktop_config.json`:
276
+ **From local directory (for development):**
266
277
 
267
278
  ```json
268
279
  {
269
280
  "mcpServers": {
270
- "perplexity": {
281
+ "perplexity-webui-scraper": {
271
282
  "command": "uv",
272
283
  "args": [
284
+ "--directory",
285
+ "/absolute/path/to/perplexity-webui-scraper",
273
286
  "run",
274
- "fastmcp",
275
- "run",
276
- "path/to/perplexity_webui_scraper/mcp/server.py"
287
+ "perplexity-webui-scraper-mcp"
277
288
  ],
278
289
  "env": {
279
290
  "PERPLEXITY_SESSION_TOKEN": "your_token_here"
@@ -283,19 +294,27 @@ Add to `~/.config/claude/claude_desktop_config.json`:
283
294
  }
284
295
  ```
285
296
 
286
- ### Available Tool
287
-
288
- | Tool | Description |
289
- | ---------------- | --------------------------------------------------------------------------- |
290
- | `perplexity_ask` | Ask questions and get AI-generated answers with real-time data from the web |
291
-
292
- **Parameters:**
293
-
294
- | Parameter | Type | Default | Description |
295
- | -------------- | ----- | -------- | ------------------------------------------------------------- |
296
- | `query` | `str` | - | Question to ask (required) |
297
- | `model` | `str` | `"best"` | AI model (`best`, `research`, `gpt52`, `claude_sonnet`, etc.) |
298
- | `source_focus` | `str` | `"web"` | Source type (`web`, `academic`, `social`, `finance`, `all`) |
297
+ ### Available Tools
298
+
299
+ Each tool uses a specific AI model. Enable only the ones you need:
300
+
301
+ | Tool | Model | Description |
302
+ | ----------------------------------- | ---------------------- | --------------------------------------------- |
303
+ | `perplexity_ask` | Best | Auto-selects best model based on query |
304
+ | `perplexity_deep_research` | Deep Research | In-depth reports with more sources and charts |
305
+ | `perplexity_sonar` | Sonar | Perplexity's latest model |
306
+ | `perplexity_gpt52` | GPT-5.2 | OpenAI's latest model |
307
+ | `perplexity_gpt52_thinking` | GPT-5.2 Thinking | OpenAI's latest model (thinking) |
308
+ | `perplexity_claude_sonnet` | Claude Sonnet 4.5 | Anthropic's fast model |
309
+ | `perplexity_claude_sonnet_thinking` | Claude Sonnet Thinking | Anthropic's fast model (thinking) |
310
+ | `perplexity_gemini_flash` | Gemini 3 Flash | Google's fast model |
311
+ | `perplexity_gemini_flash_thinking` | Gemini Flash Thinking | Google's fast model (thinking) |
312
+ | `perplexity_gemini_pro_thinking` | Gemini 3 Pro | Google's most advanced model (thinking) |
313
+ | `perplexity_grok` | Grok 4.1 | xAI's latest model |
314
+ | `perplexity_grok_thinking` | Grok 4.1 Thinking | xAI's latest model (thinking) |
315
+ | `perplexity_kimi_thinking` | Kimi K2.5 | Moonshot AI's latest model |
316
+
317
+ **All tools support `source_focus`:** `web`, `academic`, `social`, `finance`, `all`
299
318
 
300
319
  ## Disclaimer
301
320
 
@@ -0,0 +1,287 @@
1
+ <div align="center">
2
+
3
+ # Perplexity WebUI Scraper
4
+
5
+ Python scraper to extract AI responses from [Perplexity's](https://www.perplexity.ai) web interface.
6
+
7
+ [![PyPI](https://img.shields.io/pypi/v/perplexity-webui-scraper?color=blue)](https://pypi.org/project/perplexity-webui-scraper)
8
+ [![Python](https://img.shields.io/pypi/pyversions/perplexity-webui-scraper)](https://pypi.org/project/perplexity-webui-scraper)
9
+ [![License](https://img.shields.io/github/license/henrique-coder/perplexity-webui-scraper?color=green)](./LICENSE)
10
+
11
+ </div>
12
+
13
+ ---
14
+
15
+ ## Installation
16
+
17
+ ### As a Library
18
+
19
+ ```bash
20
+ # From PyPI (stable)
21
+ uv add perplexity-webui-scraper
22
+
23
+ # From GitHub dev branch (latest features)
24
+ uv add git+https://github.com/henrique-coder/perplexity-webui-scraper.git@dev
25
+ ```
26
+
27
+ ### As MCP Server
28
+
29
+ No installation required - `uvx` handles everything automatically:
30
+
31
+ ```bash
32
+ # From PyPI (stable)
33
+ uvx --from perplexity-webui-scraper[mcp] perplexity-webui-scraper-mcp
34
+
35
+ # From GitHub dev branch (latest features)
36
+ uvx --from "perplexity-webui-scraper[mcp] @ git+https://github.com/henrique-coder/perplexity-webui-scraper.git@dev" perplexity-webui-scraper-mcp
37
+
38
+ # From local directory (for development)
39
+ uv --directory /path/to/perplexity-webui-scraper run perplexity-webui-scraper-mcp
40
+ ```
41
+
42
+ ## Requirements
43
+
44
+ - **Perplexity Pro/Max account**
45
+ - **Session token** (`__Secure-next-auth.session-token` cookie)
46
+
47
+ ### Getting Your Session Token
48
+
49
+ #### Option 1: Automatic (CLI Tool)
50
+
51
+ ```bash
52
+ uv run get-perplexity-session-token
53
+ ```
54
+
55
+ This interactive tool will:
56
+
57
+ 1. Ask for your Perplexity email
58
+ 2. Send a verification code to your email
59
+ 3. Accept either a 6-digit code or magic link
60
+ 4. Extract and display your session token
61
+ 5. Optionally save it to your `.env` file
62
+
63
+ #### Option 2: Manual (Browser)
64
+
65
+ 1. Log in at [perplexity.ai](https://www.perplexity.ai)
66
+ 2. Open DevTools (`F12`) → Application/Storage → Cookies
67
+ 3. Copy the value of `__Secure-next-auth.session-token`
68
+ 4. Store in `.env`: `PERPLEXITY_SESSION_TOKEN="your_token"`
69
+
70
+ ## Quick Start
71
+
72
+ ```python
73
+ from perplexity_webui_scraper import Perplexity
74
+
75
+ client = Perplexity(session_token="YOUR_TOKEN")
76
+ conversation = client.create_conversation()
77
+
78
+ conversation.ask("What is quantum computing?")
79
+ print(conversation.answer)
80
+
81
+ # Follow-up (context is preserved)
82
+ conversation.ask("Explain it simpler")
83
+ print(conversation.answer)
84
+ ```
85
+
86
+ ### Streaming
87
+
88
+ ```python
89
+ for chunk in conversation.ask("Explain AI", stream=True):
90
+ print(chunk.answer)
91
+ ```
92
+
93
+ ### With Options
94
+
95
+ ```python
96
+ from perplexity_webui_scraper import (
97
+ ConversationConfig,
98
+ Coordinates,
99
+ Models,
100
+ SourceFocus,
101
+ )
102
+
103
+ config = ConversationConfig(
104
+ model=Models.DEEP_RESEARCH,
105
+ source_focus=[SourceFocus.WEB, SourceFocus.ACADEMIC],
106
+ language="en-US",
107
+ coordinates=Coordinates(latitude=12.3456, longitude=-98.7654),
108
+ )
109
+
110
+ conversation = client.create_conversation(config)
111
+ conversation.ask("Latest AI research", files=["paper.pdf"])
112
+ ```
113
+
114
+ ## API Reference
115
+
116
+ ### `Perplexity(session_token, config?)`
117
+
118
+ | Parameter | Type | Description |
119
+ | --------------- | -------------- | ------------------ |
120
+ | `session_token` | `str` | Browser cookie |
121
+ | `config` | `ClientConfig` | Timeout, TLS, etc. |
122
+
123
+ ### `Conversation.ask(query, model?, files?, citation_mode?, stream?)`
124
+
125
+ | Parameter | Type | Default | Description |
126
+ | --------------- | ----------------------- | ------------- | ------------------- |
127
+ | `query` | `str` | - | Question (required) |
128
+ | `model` | `Model` | `Models.BEST` | AI model |
129
+ | `files` | `list[str \| PathLike]` | `None` | File paths |
130
+ | `citation_mode` | `CitationMode` | `CLEAN` | Citation format |
131
+ | `stream` | `bool` | `False` | Enable streaming |
132
+
133
+ ### Models
134
+
135
+ | Model | Description |
136
+ | ---------------------------------- | ------------------------------------------------------------------------- |
137
+ | `Models.BEST` | Automatically selects the best model based on the query |
138
+ | `Models.DEEP_RESEARCH` | Create in-depth reports with more sources, charts, and advanced reasoning |
139
+ | `Models.CREATE_FILES_AND_APPS` | Turn your ideas into docs, slides, dashboards, and more |
140
+ | `Models.SONAR` | Perplexity's latest model |
141
+ | `Models.GPT_52` | GPT-5.2 - OpenAI's latest model |
142
+ | `Models.GPT_52_THINKING` | GPT-5.2 - OpenAI's latest model (thinking) |
143
+ | `Models.CLAUDE_45_SONNET` | Claude Sonnet 4.5 - Anthropic's fast model |
144
+ | `Models.CLAUDE_45_SONNET_THINKING` | Claude Sonnet 4.5 - Anthropic's fast model (thinking) |
145
+ | `Models.CLAUDE_45_OPUS` | Claude Opus 4.5 - Anthropic's Opus reasoning model |
146
+ | `Models.CLAUDE_45_OPUS_THINKING` | Claude Opus 4.5 - Anthropic's Opus reasoning model (thinking) |
147
+ | `Models.GEMINI_3_FLASH` | Gemini 3 Flash - Google's fast model |
148
+ | `Models.GEMINI_3_FLASH_THINKING` | Gemini 3 Flash - Google's fast model (thinking) |
149
+ | `Models.GEMINI_3_PRO_THINKING` | Gemini 3 Pro - Google's most advanced model (thinking) |
150
+ | `Models.GROK_41` | Grok 4.1 - xAI's latest model |
151
+ | `Models.GROK_41_THINKING` | Grok 4.1 - xAI's latest model (thinking) |
152
+ | `Models.KIMI_K25_THINKING` | Kimi K2.5 - Moonshot AI's latest model |
153
+
154
+ ### CitationMode
155
+
156
+ | Mode | Output |
157
+ | ---------- | --------------------- |
158
+ | `DEFAULT` | `text[1]` |
159
+ | `MARKDOWN` | `text[1](url)` |
160
+ | `CLEAN` | `text` (no citations) |
161
+
162
+ ### ConversationConfig
163
+
164
+ | Parameter | Default | Description |
165
+ | ----------------- | ------------- | ------------------ |
166
+ | `model` | `Models.BEST` | Default model |
167
+ | `citation_mode` | `CLEAN` | Citation format |
168
+ | `save_to_library` | `False` | Save to library |
169
+ | `search_focus` | `WEB` | Search type |
170
+ | `source_focus` | `WEB` | Source types |
171
+ | `time_range` | `ALL` | Time filter |
172
+ | `language` | `"en-US"` | Response language |
173
+ | `timezone` | `None` | Timezone |
174
+ | `coordinates` | `None` | Location (lat/lng) |
175
+
176
+ ## Exceptions
177
+
178
+ | Exception | Description |
179
+ | ---------------------------------- | -------------------------------------------------- |
180
+ | `PerplexityError` | Base exception for all library errors |
181
+ | `HTTPError` | HTTP error with status code and response body |
182
+ | `AuthenticationError` | Session token is invalid or expired (HTTP 401/403) |
183
+ | `RateLimitError` | Rate limit exceeded (HTTP 429) |
184
+ | `FileUploadError` | File upload failed |
185
+ | `FileValidationError` | File validation failed (size, type, etc.) |
186
+ | `ResearchClarifyingQuestionsError` | Research mode asking clarifying questions |
187
+ | `ResponseParsingError` | API response could not be parsed |
188
+ | `StreamingError` | Error during streaming response |
189
+
190
+ ## MCP Server (Model Context Protocol)
191
+
192
+ The library includes an MCP server for AI assistants like Claude Desktop and Antigravity.
193
+
194
+ Each AI model is exposed as a separate tool - enable only the ones you need to reduce agent context size.
195
+
196
+ ### Configuration
197
+
198
+ Add to your MCP config file (no installation required):
199
+
200
+ **Claude Desktop** (`~/.config/claude/claude_desktop_config.json`):
201
+
202
+ ```json
203
+ {
204
+ "mcpServers": {
205
+ "perplexity-webui-scraper": {
206
+ "command": "uvx",
207
+ "args": [
208
+ "--from",
209
+ "perplexity-webui-scraper[mcp]",
210
+ "perplexity-webui-scraper-mcp"
211
+ ],
212
+ "env": {
213
+ "PERPLEXITY_SESSION_TOKEN": "your_token_here"
214
+ }
215
+ }
216
+ }
217
+ }
218
+ ```
219
+
220
+ **From GitHub dev branch:**
221
+
222
+ ```json
223
+ {
224
+ "mcpServers": {
225
+ "perplexity-webui-scraper": {
226
+ "command": "uvx",
227
+ "args": [
228
+ "--from",
229
+ "perplexity-webui-scraper[mcp] @ git+https://github.com/henrique-coder/perplexity-webui-scraper.git@dev",
230
+ "perplexity-webui-scraper-mcp"
231
+ ],
232
+ "env": {
233
+ "PERPLEXITY_SESSION_TOKEN": "your_token_here"
234
+ }
235
+ }
236
+ }
237
+ }
238
+ ```
239
+
240
+ **From local directory (for development):**
241
+
242
+ ```json
243
+ {
244
+ "mcpServers": {
245
+ "perplexity-webui-scraper": {
246
+ "command": "uv",
247
+ "args": [
248
+ "--directory",
249
+ "/absolute/path/to/perplexity-webui-scraper",
250
+ "run",
251
+ "perplexity-webui-scraper-mcp"
252
+ ],
253
+ "env": {
254
+ "PERPLEXITY_SESSION_TOKEN": "your_token_here"
255
+ }
256
+ }
257
+ }
258
+ }
259
+ ```
260
+
261
+ ### Available Tools
262
+
263
+ Each tool uses a specific AI model. Enable only the ones you need:
264
+
265
+ | Tool | Model | Description |
266
+ | ----------------------------------- | ---------------------- | --------------------------------------------- |
267
+ | `perplexity_ask` | Best | Auto-selects best model based on query |
268
+ | `perplexity_deep_research` | Deep Research | In-depth reports with more sources and charts |
269
+ | `perplexity_sonar` | Sonar | Perplexity's latest model |
270
+ | `perplexity_gpt52` | GPT-5.2 | OpenAI's latest model |
271
+ | `perplexity_gpt52_thinking` | GPT-5.2 Thinking | OpenAI's latest model (thinking) |
272
+ | `perplexity_claude_sonnet` | Claude Sonnet 4.5 | Anthropic's fast model |
273
+ | `perplexity_claude_sonnet_thinking` | Claude Sonnet Thinking | Anthropic's fast model (thinking) |
274
+ | `perplexity_gemini_flash` | Gemini 3 Flash | Google's fast model |
275
+ | `perplexity_gemini_flash_thinking` | Gemini Flash Thinking | Google's fast model (thinking) |
276
+ | `perplexity_gemini_pro_thinking` | Gemini 3 Pro | Google's most advanced model (thinking) |
277
+ | `perplexity_grok` | Grok 4.1 | xAI's latest model |
278
+ | `perplexity_grok_thinking` | Grok 4.1 Thinking | xAI's latest model (thinking) |
279
+ | `perplexity_kimi_thinking` | Kimi K2.5 | Moonshot AI's latest model |
280
+
281
+ **All tools support `source_focus`:** `web`, `academic`, `social`, `finance`, `all`
282
+
283
+ ## Disclaimer
284
+
285
+ This is an **unofficial** library. It uses internal APIs that may change without notice. Use at your own risk.
286
+
287
+ By using this library, you agree to Perplexity AI's Terms of Service.