webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +33 -15
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +703 -250
- webscout/Bard.py +441 -323
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +15 -0
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +333 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +202 -0
- webscout/Provider/AISEARCH/genspark_search.py +324 -0
- webscout/Provider/AISEARCH/hika_search.py +186 -0
- webscout/Provider/AISEARCH/iask_search.py +410 -0
- webscout/Provider/AISEARCH/monica_search.py +220 -0
- webscout/Provider/AISEARCH/scira_search.py +298 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +791 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +369 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +375 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +283 -0
- webscout/Provider/OPENAI/NEMOTRON.py +232 -0
- webscout/Provider/OPENAI/Qwen3.py +283 -0
- webscout/Provider/OPENAI/README.md +952 -0
- webscout/Provider/OPENAI/TwoAI.py +357 -0
- webscout/Provider/OPENAI/__init__.py +40 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +969 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +494 -0
- webscout/Provider/OPENAI/chatsandbox.py +173 -0
- webscout/Provider/OPENAI/copilot.py +242 -0
- webscout/Provider/OPENAI/deepinfra.py +322 -0
- webscout/Provider/OPENAI/e2b.py +1414 -0
- webscout/Provider/OPENAI/exaai.py +417 -0
- webscout/Provider/OPENAI/exachat.py +444 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +326 -0
- webscout/Provider/OPENAI/groq.py +364 -0
- webscout/Provider/OPENAI/heckai.py +308 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +389 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +357 -0
- webscout/Provider/OPENAI/oivscode.py +287 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +477 -0
- webscout/Provider/OPENAI/sonus.py +304 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +364 -0
- webscout/Provider/OPENAI/uncovrAI.py +463 -0
- webscout/Provider/OPENAI/utils.py +318 -0
- webscout/Provider/OPENAI/venice.py +431 -0
- webscout/Provider/OPENAI/wisecat.py +387 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +365 -0
- webscout/Provider/OPENAI/yep.py +382 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/README.md +82 -0
- webscout/Provider/TTI/__init__.py +7 -0
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +10 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +475 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +174 -0
- webscout/Provider/ai4chat.py +174 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +224 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/client.py +70 -0
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +404 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +210 -0
- webscout/scout/core/scout.py +607 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +478 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +269 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
- webscout-8.2.9.dist-info/RECORD +289 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,333 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import random
|
|
3
|
+
from uuid import uuid4
|
|
4
|
+
from typing import Dict, Optional, Generator, Union, Any
|
|
5
|
+
from curl_cffi import requests
|
|
6
|
+
|
|
7
|
+
from webscout.AIbase import AISearch, SearchResponse
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout.litagent import LitAgent
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Perplexity(AISearch):
|
|
13
|
+
"""A class to interact with the Perplexity AI search API.
|
|
14
|
+
|
|
15
|
+
Perplexity provides a powerful search interface that returns AI-generated responses
|
|
16
|
+
based on web content. It supports both streaming and non-streaming responses,
|
|
17
|
+
multiple search modes, and model selection.
|
|
18
|
+
|
|
19
|
+
Basic Usage:
|
|
20
|
+
>>> from webscout import Perplexity
|
|
21
|
+
>>> ai = Perplexity()
|
|
22
|
+
>>> # Non-streaming example
|
|
23
|
+
>>> response = ai.search("What is Python?")
|
|
24
|
+
>>> print(response)
|
|
25
|
+
Python is a high-level programming language...
|
|
26
|
+
|
|
27
|
+
>>> # Streaming example
|
|
28
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
29
|
+
... print(chunk, end="", flush=True)
|
|
30
|
+
Artificial Intelligence is...
|
|
31
|
+
|
|
32
|
+
>>> # Pro search with specific model (requires authentication via cookies)
|
|
33
|
+
>>> cookies = {"perplexity-user": "your_cookie_value"}
|
|
34
|
+
>>> ai_pro = Perplexity(cookies=cookies)
|
|
35
|
+
>>> response = ai_pro.search("Latest AI research", mode="pro", model="gpt-4o")
|
|
36
|
+
>>> print(response)
|
|
37
|
+
|
|
38
|
+
>>> # Raw response format
|
|
39
|
+
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
40
|
+
... print(chunk)
|
|
41
|
+
{'text': 'Hello'}
|
|
42
|
+
{'text': ' there!'}
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
cookies (dict, optional): Cookies to use for authentication. Defaults to None.
|
|
46
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 60.
|
|
47
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
cookies: Optional[Dict[str, str]] = None,
|
|
53
|
+
timeout: int = 60,
|
|
54
|
+
proxies: Optional[Dict[str, str]] = None
|
|
55
|
+
):
|
|
56
|
+
"""
|
|
57
|
+
Initialize the Perplexity client.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
cookies (dict, optional): Cookies to use for authentication. Defaults to None.
|
|
61
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 60.
|
|
62
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
63
|
+
"""
|
|
64
|
+
self.timeout = timeout
|
|
65
|
+
self.agent = LitAgent()
|
|
66
|
+
self.session = requests.Session(headers={
|
|
67
|
+
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
68
|
+
'accept-language': 'en-US,en;q=0.9',
|
|
69
|
+
'cache-control': 'max-age=0',
|
|
70
|
+
'dnt': '1',
|
|
71
|
+
'priority': 'u=0, i',
|
|
72
|
+
'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"',
|
|
73
|
+
'sec-ch-ua-arch': '"x86"',
|
|
74
|
+
'sec-ch-ua-bitness': '"64"',
|
|
75
|
+
'sec-ch-ua-full-version': '"128.0.6613.120"',
|
|
76
|
+
'sec-ch-ua-full-version-list': '"Not;A=Brand";v="24.0.0.0", "Chromium";v="128.0.6613.120"',
|
|
77
|
+
'sec-ch-ua-mobile': '?0',
|
|
78
|
+
'sec-ch-ua-model': '""',
|
|
79
|
+
'sec-ch-ua-platform': '"Windows"',
|
|
80
|
+
'sec-ch-ua-platform-version': '"19.0.0"',
|
|
81
|
+
'sec-fetch-dest': 'document',
|
|
82
|
+
'sec-fetch-mode': 'navigate',
|
|
83
|
+
'sec-fetch-site': 'same-origin',
|
|
84
|
+
'sec-fetch-user': '?1',
|
|
85
|
+
'upgrade-insecure-requests': '1',
|
|
86
|
+
'user-agent': self.agent.random(),
|
|
87
|
+
}, cookies=cookies or {}, impersonate='chrome')
|
|
88
|
+
|
|
89
|
+
# Apply proxies if provided
|
|
90
|
+
if proxies:
|
|
91
|
+
self.session.proxies.update(proxies)
|
|
92
|
+
|
|
93
|
+
# Initialize session with socket.io
|
|
94
|
+
self.timestamp = format(random.getrandbits(32), '08x')
|
|
95
|
+
|
|
96
|
+
# Get socket.io session ID
|
|
97
|
+
response = self.session.get(f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}')
|
|
98
|
+
self.sid = json.loads(response.text[1:])['sid']
|
|
99
|
+
|
|
100
|
+
# Initialize socket.io connection
|
|
101
|
+
assert (self.session.post(
|
|
102
|
+
f'https://www.perplexity.ai/socket.io/?EIO=4&transport=polling&t={self.timestamp}&sid={self.sid}',
|
|
103
|
+
data='40{"jwt":"anonymous-ask-user"}'
|
|
104
|
+
)).text == 'OK'
|
|
105
|
+
|
|
106
|
+
# Get session info
|
|
107
|
+
self.session.get('https://www.perplexity.ai/api/auth/session')
|
|
108
|
+
|
|
109
|
+
# Set default values
|
|
110
|
+
self.copilot = 0 if not cookies else float('inf')
|
|
111
|
+
self.file_upload = 0 if not cookies else float('inf')
|
|
112
|
+
|
|
113
|
+
def _extract_answer(self, response):
|
|
114
|
+
"""
|
|
115
|
+
Extract the answer from the response.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
response (dict): The response from Perplexity AI.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
str: The extracted answer text.
|
|
122
|
+
"""
|
|
123
|
+
if not response:
|
|
124
|
+
return ""
|
|
125
|
+
|
|
126
|
+
# Find the FINAL step in the text array
|
|
127
|
+
final_step = None
|
|
128
|
+
if 'text' in response and isinstance(response['text'], list):
|
|
129
|
+
for step in response['text']:
|
|
130
|
+
if step.get('step_type') == 'FINAL' and 'content' in step and 'answer' in step['content']:
|
|
131
|
+
final_step = step
|
|
132
|
+
break
|
|
133
|
+
|
|
134
|
+
if not final_step:
|
|
135
|
+
return ""
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
# Parse the answer JSON string
|
|
139
|
+
answer_json = json.loads(final_step['content']['answer'])
|
|
140
|
+
return answer_json.get('answer', '')
|
|
141
|
+
except (json.JSONDecodeError, KeyError):
|
|
142
|
+
return ""
|
|
143
|
+
|
|
144
|
+
def search(
|
|
145
|
+
self,
|
|
146
|
+
prompt: str,
|
|
147
|
+
mode: str = 'auto',
|
|
148
|
+
model: Optional[str] = None,
|
|
149
|
+
sources: Optional[list] = None,
|
|
150
|
+
stream: bool = False,
|
|
151
|
+
raw: bool = False,
|
|
152
|
+
language: str = 'en-US',
|
|
153
|
+
follow_up: Optional[Dict[str, Any]] = None,
|
|
154
|
+
incognito: bool = False
|
|
155
|
+
) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse], None, None]]:
|
|
156
|
+
"""Search using the Perplexity API and get AI-generated responses.
|
|
157
|
+
|
|
158
|
+
This method sends a search query to Perplexity and returns the AI-generated response.
|
|
159
|
+
It supports both streaming and non-streaming modes, as well as raw response format.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
prompt (str): The search query or prompt to send to the API.
|
|
163
|
+
mode (str, optional): Search mode. Options: 'auto', 'pro', 'reasoning', 'deep research'.
|
|
164
|
+
Defaults to 'auto'.
|
|
165
|
+
model (str, optional): Model to use. Available models depend on the mode. Defaults to None.
|
|
166
|
+
sources (list, optional): Sources to use. Options: 'web', 'scholar', 'social'.
|
|
167
|
+
Defaults to ['web'].
|
|
168
|
+
stream (bool, optional): If True, yields response chunks as they arrive.
|
|
169
|
+
If False, returns complete response. Defaults to False.
|
|
170
|
+
raw (bool, optional): If True, returns raw response dictionaries.
|
|
171
|
+
If False, returns Response objects that convert to text automatically.
|
|
172
|
+
Defaults to False.
|
|
173
|
+
language (str, optional): Language to use. Defaults to 'en-US'.
|
|
174
|
+
follow_up (dict, optional): Follow-up information. Defaults to None.
|
|
175
|
+
incognito (bool, optional): Whether to use incognito mode. Defaults to False.
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
If stream=True: Generator yielding response chunks as they arrive
|
|
179
|
+
If stream=False: Complete response
|
|
180
|
+
|
|
181
|
+
Raises:
|
|
182
|
+
ValueError: If invalid mode or model is provided
|
|
183
|
+
exceptions.APIConnectionError: If connection to API fails
|
|
184
|
+
exceptions.FailedToGenerateResponseError: If response generation fails
|
|
185
|
+
"""
|
|
186
|
+
if sources is None:
|
|
187
|
+
sources = ['web']
|
|
188
|
+
|
|
189
|
+
# Validate inputs
|
|
190
|
+
if mode not in ['auto', 'pro', 'reasoning', 'deep research']:
|
|
191
|
+
raise ValueError('Search modes -> ["auto", "pro", "reasoning", "deep research"]')
|
|
192
|
+
|
|
193
|
+
if not all([source in ('web', 'scholar', 'social') for source in sources]):
|
|
194
|
+
raise ValueError('Sources -> ["web", "scholar", "social"]')
|
|
195
|
+
|
|
196
|
+
# Check if model is valid for the selected mode
|
|
197
|
+
valid_models = {
|
|
198
|
+
'auto': [None],
|
|
199
|
+
'pro': [None, 'sonar', 'gpt-4.5', 'gpt-4o', 'claude 3.7 sonnet', 'gemini 2.0 flash', 'grok-2'],
|
|
200
|
+
'reasoning': [None, 'r1', 'o3-mini', 'claude 3.7 sonnet'],
|
|
201
|
+
'deep research': [None]
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
if mode in valid_models and model not in valid_models[mode] and model is not None:
|
|
205
|
+
raise ValueError(f'Invalid model for {mode} mode. Valid models: {valid_models[mode]}')
|
|
206
|
+
|
|
207
|
+
# Prepare request data
|
|
208
|
+
json_data = {
|
|
209
|
+
'query_str': prompt,
|
|
210
|
+
'params': {
|
|
211
|
+
'attachments': follow_up['attachments'] if follow_up else [],
|
|
212
|
+
'frontend_context_uuid': str(uuid4()),
|
|
213
|
+
'frontend_uuid': str(uuid4()),
|
|
214
|
+
'is_incognito': incognito,
|
|
215
|
+
'language': language,
|
|
216
|
+
'last_backend_uuid': follow_up['backend_uuid'] if follow_up else None,
|
|
217
|
+
'mode': 'concise' if mode == 'auto' else 'copilot',
|
|
218
|
+
'model_preference': {
|
|
219
|
+
'auto': {
|
|
220
|
+
None: 'turbo'
|
|
221
|
+
},
|
|
222
|
+
'pro': {
|
|
223
|
+
None: 'pplx_pro',
|
|
224
|
+
'sonar': 'experimental',
|
|
225
|
+
'gpt-4.5': 'gpt45',
|
|
226
|
+
'gpt-4o': 'gpt4o',
|
|
227
|
+
'claude 3.7 sonnet': 'claude2',
|
|
228
|
+
'gemini 2.0 flash': 'gemini2flash',
|
|
229
|
+
'grok-2': 'grok'
|
|
230
|
+
},
|
|
231
|
+
'reasoning': {
|
|
232
|
+
None: 'pplx_reasoning',
|
|
233
|
+
'r1': 'r1',
|
|
234
|
+
'o3-mini': 'o3mini',
|
|
235
|
+
'claude 3.7 sonnet': 'claude37sonnetthinking'
|
|
236
|
+
},
|
|
237
|
+
'deep research': {
|
|
238
|
+
None: 'pplx_alpha'
|
|
239
|
+
}
|
|
240
|
+
}[mode][model],
|
|
241
|
+
'source': 'default',
|
|
242
|
+
'sources': sources,
|
|
243
|
+
'version': '2.18'
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
try:
|
|
248
|
+
# Make the request
|
|
249
|
+
resp = self.session.post(
|
|
250
|
+
'https://www.perplexity.ai/rest/sse/perplexity_ask',
|
|
251
|
+
json=json_data,
|
|
252
|
+
stream=True,
|
|
253
|
+
timeout=self.timeout
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
if resp.status_code != 200:
|
|
257
|
+
raise exceptions.APIConnectionError(f"API returned status code {resp.status_code}")
|
|
258
|
+
|
|
259
|
+
# Define streaming response handler
|
|
260
|
+
def stream_response():
|
|
261
|
+
for chunk in resp.iter_lines(delimiter=b'\r\n\r\n'):
|
|
262
|
+
content = chunk.decode('utf-8')
|
|
263
|
+
if content.startswith('event: message\r\n'):
|
|
264
|
+
content_json = json.loads(content[len('event: message\r\ndata: '):])
|
|
265
|
+
if 'text' in content_json:
|
|
266
|
+
try:
|
|
267
|
+
# If text is a string, try to parse it as JSON
|
|
268
|
+
if isinstance(content_json['text'], str):
|
|
269
|
+
content_json['text'] = json.loads(content_json['text'])
|
|
270
|
+
except json.JSONDecodeError:
|
|
271
|
+
pass
|
|
272
|
+
|
|
273
|
+
if raw:
|
|
274
|
+
yield content_json
|
|
275
|
+
else:
|
|
276
|
+
# For non-raw responses, extract text from each chunk
|
|
277
|
+
if 'text' in content_json and isinstance(content_json['text'], list):
|
|
278
|
+
for step in content_json['text']:
|
|
279
|
+
if step.get('type') == 'answer' and 'value' in step:
|
|
280
|
+
yield SearchResponse(step['value'])
|
|
281
|
+
elif step.get('type') == 'thinking' and 'value' in step:
|
|
282
|
+
yield SearchResponse(step['value'])
|
|
283
|
+
elif content.startswith('event: end_of_stream\r\n'):
|
|
284
|
+
return
|
|
285
|
+
|
|
286
|
+
# Handle streaming or non-streaming response
|
|
287
|
+
if stream:
|
|
288
|
+
return stream_response()
|
|
289
|
+
else:
|
|
290
|
+
chunks = []
|
|
291
|
+
final_response = None
|
|
292
|
+
|
|
293
|
+
for chunk in resp.iter_lines(delimiter=b'\r\n\r\n'):
|
|
294
|
+
content = chunk.decode('utf-8')
|
|
295
|
+
if content.startswith('event: message\r\n'):
|
|
296
|
+
content_json = json.loads(content[len('event: message\r\ndata: '):])
|
|
297
|
+
if 'text' in content_json:
|
|
298
|
+
try:
|
|
299
|
+
# If text is a string, try to parse it as JSON
|
|
300
|
+
if isinstance(content_json['text'], str):
|
|
301
|
+
content_json['text'] = json.loads(content_json['text'])
|
|
302
|
+
except json.JSONDecodeError:
|
|
303
|
+
pass
|
|
304
|
+
chunks.append(content_json)
|
|
305
|
+
final_response = content_json
|
|
306
|
+
elif content.startswith('event: end_of_stream\r\n'):
|
|
307
|
+
# Process the final response to extract the answer
|
|
308
|
+
if final_response:
|
|
309
|
+
answer_text = self._extract_answer(final_response)
|
|
310
|
+
return SearchResponse(answer_text) if not raw else final_response
|
|
311
|
+
elif chunks:
|
|
312
|
+
answer_text = self._extract_answer(chunks[-1])
|
|
313
|
+
return SearchResponse(answer_text) if not raw else chunks[-1]
|
|
314
|
+
else:
|
|
315
|
+
return SearchResponse("") if not raw else {}
|
|
316
|
+
|
|
317
|
+
# If we get here, something went wrong
|
|
318
|
+
raise exceptions.FailedToGenerateResponseError("Failed to get complete response")
|
|
319
|
+
|
|
320
|
+
except requests.RequestsError as e:
|
|
321
|
+
raise exceptions.APIConnectionError(f"Connection error: {str(e)}")
|
|
322
|
+
except json.JSONDecodeError:
|
|
323
|
+
raise exceptions.FailedToGenerateResponseError("Failed to parse response JSON")
|
|
324
|
+
except Exception as e:
|
|
325
|
+
raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
if __name__ == "__main__":
|
|
329
|
+
# Simple test
|
|
330
|
+
ai = Perplexity()
|
|
331
|
+
response = ai.search("What is Python?")
|
|
332
|
+
print(response)
|
|
333
|
+
|
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
<div align="center">
|
|
2
|
+
<h1>🔍 Webscout AI Search Providers</h1>
|
|
3
|
+
<p><strong>Powerful AI-powered search capabilities with multiple provider support</strong></p>
|
|
4
|
+
</div>
|
|
5
|
+
|
|
6
|
+
> [!NOTE]
|
|
7
|
+
> AI Search Providers leverage advanced language models and search algorithms to deliver high-quality, context-aware responses with web search integration.
|
|
8
|
+
|
|
9
|
+
## ✨ Features
|
|
10
|
+
|
|
11
|
+
- **Multiple Search Providers**: Support for 7+ specialized AI search services
|
|
12
|
+
- **Streaming Responses**: Real-time streaming of AI-generated responses
|
|
13
|
+
- **Raw Response Format**: Access to raw response data when needed
|
|
14
|
+
- **Automatic Text Handling**: Smart response formatting and cleaning
|
|
15
|
+
- **Robust Error Handling**: Comprehensive error management
|
|
16
|
+
- **Cross-Platform Compatibility**: Works seamlessly across different environments
|
|
17
|
+
|
|
18
|
+
## 📦 Supported Search Providers
|
|
19
|
+
|
|
20
|
+
| Provider | Description | Key Features |
|
|
21
|
+
|----------|-------------|-------------|
|
|
22
|
+
| **DeepFind** | General purpose AI search | Web-based, reference removal, clean formatting |
|
|
23
|
+
| **Felo** | Fast streaming search | Advanced capabilities, real-time streaming |
|
|
24
|
+
| **Isou** | Scientific search | Multiple model selection, citation handling |
|
|
25
|
+
| **Genspark** | Efficient search | Fast response, markdown link removal |
|
|
26
|
+
| **Monica** | Comprehensive search | Related question suggestions, source references |
|
|
27
|
+
| **WebPilotAI** | Web-integrated search | Web page analysis, content extraction |
|
|
28
|
+
| **Scira** | Research-focused search | Multiple models (Grok3, Claude), vision support |
|
|
29
|
+
| **IAsk** | Multi-mode search | Question, Academic, Fast modes, detail levels |
|
|
30
|
+
| **Hika** | General AI search | Simple interface, clean text output |
|
|
31
|
+
| **Perplexity** | Advanced AI search & chat | Multiple modes (Pro, Reasoning), model selection, source control |
|
|
32
|
+
|
|
33
|
+
## 🚀 Installation
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
pip install -U webscout
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## 💻 Quick Start Guide
|
|
40
|
+
|
|
41
|
+
### Basic Usage Pattern
|
|
42
|
+
|
|
43
|
+
All AI Search providers follow a consistent usage pattern:
|
|
44
|
+
|
|
45
|
+
```python
|
|
46
|
+
from webscout import ProviderName
|
|
47
|
+
|
|
48
|
+
# Initialize the provider
|
|
49
|
+
ai = ProviderName()
|
|
50
|
+
|
|
51
|
+
# Basic search
|
|
52
|
+
response = ai.search("Your query here")
|
|
53
|
+
print(response) # Automatically formats the response
|
|
54
|
+
|
|
55
|
+
# Streaming search
|
|
56
|
+
for chunk in ai.search("Your query here", stream=True):
|
|
57
|
+
print(chunk, end="", flush=True) # Print response as it arrives
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
### Provider Examples
|
|
61
|
+
|
|
62
|
+
<details>
|
|
63
|
+
<summary><strong>DeepFind Example</strong></summary>
|
|
64
|
+
|
|
65
|
+
```python
|
|
66
|
+
from webscout import DeepFind
|
|
67
|
+
|
|
68
|
+
# Initialize the search provider
|
|
69
|
+
ai = DeepFind()
|
|
70
|
+
|
|
71
|
+
# Basic search
|
|
72
|
+
response = ai.search("What is Python?")
|
|
73
|
+
print(response)
|
|
74
|
+
|
|
75
|
+
# Streaming search
|
|
76
|
+
for chunk in ai.search("Tell me about AI", stream=True):
|
|
77
|
+
print(chunk, end="")
|
|
78
|
+
```
|
|
79
|
+
</details>
|
|
80
|
+
|
|
81
|
+
<details>
|
|
82
|
+
<summary><strong>Scira Example</strong></summary>
|
|
83
|
+
|
|
84
|
+
```python
|
|
85
|
+
from webscout import Scira
|
|
86
|
+
|
|
87
|
+
# Initialize with default model (Grok3)
|
|
88
|
+
ai = Scira()
|
|
89
|
+
|
|
90
|
+
# Basic search
|
|
91
|
+
response = ai.search("What is the impact of climate change?")
|
|
92
|
+
print(response)
|
|
93
|
+
|
|
94
|
+
# Streaming search with Claude model
|
|
95
|
+
ai = Scira(model="scira-claude")
|
|
96
|
+
for chunk in ai.search("Explain quantum computing", stream=True):
|
|
97
|
+
print(chunk, end="", flush=True)
|
|
98
|
+
|
|
99
|
+
# Available models:
|
|
100
|
+
# - scira-default (Grok3)
|
|
101
|
+
# - scira-grok-3-mini (Grok3-mini)
|
|
102
|
+
# - scira-vision (Grok2-Vision)
|
|
103
|
+
# - scira-claude (Sonnet-3.7)
|
|
104
|
+
# - scira-optimus (optimus)
|
|
105
|
+
```
|
|
106
|
+
</details>
|
|
107
|
+
|
|
108
|
+
<details>
|
|
109
|
+
<summary><strong>Isou Example</strong></summary>
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
from webscout import Isou
|
|
113
|
+
|
|
114
|
+
# Initialize with specific model
|
|
115
|
+
ai = Isou(model="siliconflow:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B")
|
|
116
|
+
|
|
117
|
+
# Get a response with scientific information
|
|
118
|
+
response = ai.search("Explain the double-slit experiment")
|
|
119
|
+
print(response)
|
|
120
|
+
```
|
|
121
|
+
</details>
|
|
122
|
+
|
|
123
|
+
<details>
|
|
124
|
+
<summary><strong>Perplexity Example</strong></summary>
|
|
125
|
+
|
|
126
|
+
```python
|
|
127
|
+
from webscout import Perplexity
|
|
128
|
+
|
|
129
|
+
# Initialize (optionally pass cookies for authenticated features)
|
|
130
|
+
# cookies = {"perplexity-user": "your_cookie_value"}
|
|
131
|
+
# ai = Perplexity(cookies=cookies)
|
|
132
|
+
ai = Perplexity() # Anonymous access
|
|
133
|
+
|
|
134
|
+
# Basic search (auto mode)
|
|
135
|
+
response = ai.search("What is the weather in London?")
|
|
136
|
+
print(response)
|
|
137
|
+
|
|
138
|
+
# Streaming search
|
|
139
|
+
for chunk in ai.search("Explain black holes", stream=True):
|
|
140
|
+
print(chunk, end="", flush=True)
|
|
141
|
+
|
|
142
|
+
# Pro search with specific model (requires authentication via cookies)
|
|
143
|
+
# try:
|
|
144
|
+
# ai_pro = Perplexity(cookies=your_cookies)
|
|
145
|
+
# response_pro = ai_pro.search("Latest AI research papers", mode='pro', model='gpt-4o', sources=['scholar'])
|
|
146
|
+
# print(response_pro)
|
|
147
|
+
# except Exception as e:
|
|
148
|
+
# print(f"Pro search failed: {e}")
|
|
149
|
+
|
|
150
|
+
# Available modes: 'auto', 'pro', 'reasoning', 'deep research'
|
|
151
|
+
# Available sources: 'web', 'scholar', 'social'
|
|
152
|
+
# Models depend on the mode selected.
|
|
153
|
+
```
|
|
154
|
+
</details>
|
|
155
|
+
|
|
156
|
+
## 🎛️ Advanced Configuration
|
|
157
|
+
|
|
158
|
+
<details>
|
|
159
|
+
<summary><strong>Timeout and Proxy Settings</strong></summary>
|
|
160
|
+
|
|
161
|
+
```python
|
|
162
|
+
# Configure timeout
|
|
163
|
+
ai = DeepFind(timeout=60) # 60 seconds timeout
|
|
164
|
+
|
|
165
|
+
# Use with proxy
|
|
166
|
+
proxies = {'http': 'http://proxy.com:8080'}
|
|
167
|
+
ai = Felo(proxies=proxies)
|
|
168
|
+
|
|
169
|
+
# Configure max tokens (for providers that support it)
|
|
170
|
+
ai = Genspark(max_tokens=800)
|
|
171
|
+
|
|
172
|
+
# Configure model and group for Scira
|
|
173
|
+
ai = Scira(model="scira-claude", group="web")
|
|
174
|
+
```
|
|
175
|
+
</details>
|
|
176
|
+
|
|
177
|
+
<details>
|
|
178
|
+
<summary><strong>Response Formats</strong></summary>
|
|
179
|
+
|
|
180
|
+
```python
|
|
181
|
+
# Get raw response format
|
|
182
|
+
response = ai.search("Hello", stream=True, raw=True)
|
|
183
|
+
# Output: {'text': 'Hello'}, {'text': ' there!'}, etc.
|
|
184
|
+
|
|
185
|
+
# Get formatted text response
|
|
186
|
+
response = ai.search("Hello", stream=True)
|
|
187
|
+
# Output: Hello there!
|
|
188
|
+
```
|
|
189
|
+
</details>
|
|
190
|
+
|
|
191
|
+
## 🔧 Provider Capabilities
|
|
192
|
+
|
|
193
|
+
| Provider | Key Capabilities | Technical Details |
|
|
194
|
+
|----------|-----------------|-------------------|
|
|
195
|
+
| **DeepFind** | • Web-based AI search<br>• Automatic reference removal<br>• Clean response formatting | • Streaming support with progress tracking<br>• JSON response parsing<br>• Error handling |
|
|
196
|
+
| **Felo** | • Advanced search capabilities<br>• Real-time response streaming<br>• JSON-based response parsing | • Automatic text cleaning<br>• Session management<br>• Rate limiting support |
|
|
197
|
+
| **Isou** | • Multiple model selection<br>• Scientific and general category support<br>• Citation handling | • Deep and simple search modes<br>• Specialized model options<br>• Markdown formatting |
|
|
198
|
+
| **Genspark** | • Fast response generation<br>• Markdown link removal<br>• JSON structure normalization | • Session-based API interactions<br>• Efficient content parsing<br>• Streaming optimization |
|
|
199
|
+
| **Monica** | • Comprehensive search responses<br>• Related question suggestions<br>• Source references | • Answer snippets<br>• Clean formatted responses<br>• Web content integration |
|
|
200
|
+
| **WebPilotAI** | • Web page analysis<br>• Content extraction<br>• Structured data retrieval | • URL processing<br>• HTML parsing<br>• Metadata extraction |
|
|
201
|
+
| **Scira** | • Research-focused search<br>• Multiple model options<br>• Vision support | • Grok3, Claude, Vision models<br>• Customizable group parameters<br>• Efficient content parsing |
|
|
202
|
+
| **IAsk** | • Multi-mode search (Question, Academic, etc.)<br>• Adjustable detail level<br>• Source citation | • Asynchronous backend (sync wrapper)<br>• WebSocket communication<br>• HTML parsing & formatting |
|
|
203
|
+
| **Hika** | • General AI search<br>• Simple streaming<br>• Basic text cleaning | • SSE streaming<br>• Custom headers for auth<br>• JSON response parsing |
|
|
204
|
+
| **Perplexity** | • Multiple search modes (Pro, Reasoning)<br>• Model selection per mode<br>• Source filtering (web, scholar, social)<br>• Follow-up questions | • `curl_cffi` for Cloudflare bypass<br>• Socket.IO communication<br>• SSE streaming<br>• Requires cookies for Pro features |
|
|
205
|
+
|
|
206
|
+
## 🛡️ Error Handling
|
|
207
|
+
|
|
208
|
+
<details>
|
|
209
|
+
<summary><strong>Exception Handling Example</strong></summary>
|
|
210
|
+
|
|
211
|
+
```python
|
|
212
|
+
from webscout import exceptions
|
|
213
|
+
|
|
214
|
+
try:
|
|
215
|
+
response = ai.search("Your query")
|
|
216
|
+
except exceptions.APIConnectionError as e:
|
|
217
|
+
print(f"API error: {e}")
|
|
218
|
+
except Exception as e:
|
|
219
|
+
print(f"An error occurred: {e}")
|
|
220
|
+
```
|
|
221
|
+
</details>
|
|
222
|
+
|
|
223
|
+
## 📝 Response Handling
|
|
224
|
+
|
|
225
|
+
<details>
|
|
226
|
+
<summary><strong>Working with Response Objects</strong></summary>
|
|
227
|
+
|
|
228
|
+
```python
|
|
229
|
+
# Response objects automatically convert to text
|
|
230
|
+
response = ai.search("What is AI?")
|
|
231
|
+
print(response) # Prints formatted text
|
|
232
|
+
|
|
233
|
+
# Access raw text if needed
|
|
234
|
+
print(response.text)
|
|
235
|
+
```
|
|
236
|
+
</details>
|
|
237
|
+
|
|
238
|
+
## 🔒 Best Practices
|
|
239
|
+
|
|
240
|
+
<details>
|
|
241
|
+
<summary><strong>Streaming for Long Responses</strong></summary>
|
|
242
|
+
|
|
243
|
+
```python
|
|
244
|
+
for chunk in ai.search("Long query", stream=True):
|
|
245
|
+
print(chunk, end="", flush=True)
|
|
246
|
+
```
|
|
247
|
+
</details>
|
|
248
|
+
|
|
249
|
+
<details>
|
|
250
|
+
<summary><strong>Error Handling</strong></summary>
|
|
251
|
+
|
|
252
|
+
```python
|
|
253
|
+
try:
|
|
254
|
+
response = ai.search("Query")
|
|
255
|
+
except exceptions.APIConnectionError:
|
|
256
|
+
# Handle connection errors
|
|
257
|
+
pass
|
|
258
|
+
```
|
|
259
|
+
</details>
|
|
260
|
+
|
|
261
|
+
<details>
|
|
262
|
+
<summary><strong>Provider Selection Guide</strong></summary>
|
|
263
|
+
|
|
264
|
+
| Use Case | Recommended Provider |
|
|
265
|
+
|----------|----------------------|
|
|
266
|
+
| General purpose search | **DeepFind**, **Hika** |
|
|
267
|
+
| Fast streaming responses | **Felo** |
|
|
268
|
+
| Scientific or specialized queries | **Isou**, **Scira** |
|
|
269
|
+
| Clean and efficient responses | **Genspark** |
|
|
270
|
+
| Comprehensive answers with sources | **Monica**, **IAsk** |
|
|
271
|
+
| Web page interaction/analysis | **WebPilotAI** |
|
|
272
|
+
| Advanced control (modes, models) | **Perplexity**, **Scira**, **Isou** |
|
|
273
|
+
| Research-focused | **Scira**, **Isou**, **Perplexity** (with scholar source) |
|
|
274
|
+
|
|
275
|
+
</details>
|
|
276
|
+
|
|
277
|
+
## 🤝 Contributing
|
|
278
|
+
|
|
279
|
+
Contributions are welcome! Please feel free to submit a Pull Request.
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
from .felo_search import *
|
|
2
|
+
from .DeepFind import *
|
|
3
|
+
from .genspark_search import *
|
|
4
|
+
from .monica_search import *
|
|
5
|
+
from .webpilotai_search import *
|
|
6
|
+
from .hika_search import *
|
|
7
|
+
from .scira_search import *
|
|
8
|
+
from .iask_search import *
|
|
9
|
+
from .Perplexity import *
|