webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +33 -15
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +703 -250
- webscout/Bard.py +441 -323
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +15 -0
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +333 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +202 -0
- webscout/Provider/AISEARCH/genspark_search.py +324 -0
- webscout/Provider/AISEARCH/hika_search.py +186 -0
- webscout/Provider/AISEARCH/iask_search.py +410 -0
- webscout/Provider/AISEARCH/monica_search.py +220 -0
- webscout/Provider/AISEARCH/scira_search.py +298 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +791 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +369 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +375 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +283 -0
- webscout/Provider/OPENAI/NEMOTRON.py +232 -0
- webscout/Provider/OPENAI/Qwen3.py +283 -0
- webscout/Provider/OPENAI/README.md +952 -0
- webscout/Provider/OPENAI/TwoAI.py +357 -0
- webscout/Provider/OPENAI/__init__.py +40 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +969 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +494 -0
- webscout/Provider/OPENAI/chatsandbox.py +173 -0
- webscout/Provider/OPENAI/copilot.py +242 -0
- webscout/Provider/OPENAI/deepinfra.py +322 -0
- webscout/Provider/OPENAI/e2b.py +1414 -0
- webscout/Provider/OPENAI/exaai.py +417 -0
- webscout/Provider/OPENAI/exachat.py +444 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +326 -0
- webscout/Provider/OPENAI/groq.py +364 -0
- webscout/Provider/OPENAI/heckai.py +308 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +389 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +357 -0
- webscout/Provider/OPENAI/oivscode.py +287 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +477 -0
- webscout/Provider/OPENAI/sonus.py +304 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +364 -0
- webscout/Provider/OPENAI/uncovrAI.py +463 -0
- webscout/Provider/OPENAI/utils.py +318 -0
- webscout/Provider/OPENAI/venice.py +431 -0
- webscout/Provider/OPENAI/wisecat.py +387 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +365 -0
- webscout/Provider/OPENAI/yep.py +382 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/README.md +82 -0
- webscout/Provider/TTI/__init__.py +7 -0
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +10 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +475 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +174 -0
- webscout/Provider/ai4chat.py +174 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +224 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/client.py +70 -0
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +404 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +210 -0
- webscout/scout/core/scout.py +607 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +478 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +269 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
- webscout-8.2.9.dist-info/RECORD +289 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
from typing import Dict, Optional, Generator, Union, Any
|
|
5
|
+
|
|
6
|
+
from webscout.AIbase import AISearch, SearchResponse
|
|
7
|
+
from webscout import exceptions
|
|
8
|
+
from webscout.litagent import LitAgent
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class webpilotai(AISearch):
|
|
12
|
+
"""A class to interact with the webpilotai (WebPilot) AI search API.
|
|
13
|
+
|
|
14
|
+
webpilotai provides a web-based comprehensive search SearchResponse interface that returns AI-generated
|
|
15
|
+
SearchResponses with source references and related questions. It supports both streaming and
|
|
16
|
+
non-streaming SearchResponses.
|
|
17
|
+
|
|
18
|
+
Basic Usage:
|
|
19
|
+
>>> from webscout import webpilotai
|
|
20
|
+
>>> ai = webpilotai()
|
|
21
|
+
>>> # Non-streaming example
|
|
22
|
+
>>> response = ai.search("What is Python?")
|
|
23
|
+
>>> print(response)
|
|
24
|
+
Python is a high-level programming language...
|
|
25
|
+
|
|
26
|
+
>>> # Streaming example
|
|
27
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
28
|
+
... print(chunk, end="", flush=True)
|
|
29
|
+
Artificial Intelligence is...
|
|
30
|
+
|
|
31
|
+
>>> # Raw SearchResponse format
|
|
32
|
+
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
33
|
+
... print(chunk)
|
|
34
|
+
{'text': 'Hello'}
|
|
35
|
+
{'text': ' there!'}
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 90.
|
|
39
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
timeout: int = 90,
|
|
45
|
+
proxies: Optional[dict] = None,
|
|
46
|
+
):
|
|
47
|
+
"""Initialize the webpilotai API client.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 90.
|
|
51
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
52
|
+
|
|
53
|
+
Example:
|
|
54
|
+
>>> ai = webpilotai(timeout=120) # Longer timeout
|
|
55
|
+
>>> ai = webpilotai(proxies={'http': 'http://proxy.com:8080'}) # With proxy
|
|
56
|
+
"""
|
|
57
|
+
self.session = requests.Session()
|
|
58
|
+
self.api_endpoint = "https://api.webpilotai.com/rupee/v1/search"
|
|
59
|
+
self.timeout = timeout
|
|
60
|
+
self.last_SearchResponse = {}
|
|
61
|
+
|
|
62
|
+
# The 'Bearer null' is part of the API's expected headers
|
|
63
|
+
self.headers = {
|
|
64
|
+
'Accept': 'application/json, text/plain, */*, text/event-stream',
|
|
65
|
+
'Content-Type': 'application/json;charset=UTF-8',
|
|
66
|
+
'Authorization': 'Bearer null',
|
|
67
|
+
'Origin': 'https://www.webpilot.ai',
|
|
68
|
+
'Referer': 'https://www.webpilot.ai/',
|
|
69
|
+
'User-Agent': LitAgent().random(),
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
self.session.headers.update(self.headers)
|
|
73
|
+
self.proxies = proxies
|
|
74
|
+
|
|
75
|
+
def search(
|
|
76
|
+
self,
|
|
77
|
+
prompt: str,
|
|
78
|
+
stream: bool = False,
|
|
79
|
+
raw: bool = False,
|
|
80
|
+
) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse], None, None]]:
|
|
81
|
+
"""Search using the webpilotai API and get AI-generated SearchResponses.
|
|
82
|
+
|
|
83
|
+
This method sends a search query to webpilotai and returns the AI-generated SearchResponse.
|
|
84
|
+
It supports both streaming and non-streaming modes, as well as raw SearchResponse format.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
prompt (str): The search query or prompt to send to the API.
|
|
88
|
+
stream (bool, optional): If True, yields SearchResponse chunks as they arrive.
|
|
89
|
+
If False, returns complete SearchResponse. Defaults to False.
|
|
90
|
+
raw (bool, optional): If True, returns raw SearchResponse dictionaries with 'text' key.
|
|
91
|
+
If False, returns SearchResponse objects that convert to text automatically.
|
|
92
|
+
Defaults to False.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse], None, None]]:
|
|
96
|
+
- If stream=False: Returns complete SearchResponse as SearchResponse object
|
|
97
|
+
- If stream=True: Yields SearchResponse chunks as either Dict or SearchResponse objects
|
|
98
|
+
|
|
99
|
+
Raises:
|
|
100
|
+
APIConnectionError: If the API request fails
|
|
101
|
+
|
|
102
|
+
Examples:
|
|
103
|
+
Basic search:
|
|
104
|
+
>>> ai = webpilotai()
|
|
105
|
+
>>> response = ai.search("What is Python?")
|
|
106
|
+
>>> print(response)
|
|
107
|
+
Python is a programming language...
|
|
108
|
+
|
|
109
|
+
Streaming SearchResponse:
|
|
110
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
111
|
+
... print(chunk, end="")
|
|
112
|
+
Artificial Intelligence...
|
|
113
|
+
|
|
114
|
+
Raw SearchResponse format:
|
|
115
|
+
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
116
|
+
... print(chunk)
|
|
117
|
+
{'text': 'Hello'}
|
|
118
|
+
{'text': ' there!'}
|
|
119
|
+
"""
|
|
120
|
+
payload = {
|
|
121
|
+
"q": prompt,
|
|
122
|
+
"threadId": "" # Empty for new search
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
def for_stream():
|
|
126
|
+
full_SearchResponse_content = ""
|
|
127
|
+
current_event_name = None
|
|
128
|
+
current_data_buffer = []
|
|
129
|
+
|
|
130
|
+
try:
|
|
131
|
+
with self.session.post(
|
|
132
|
+
self.api_endpoint,
|
|
133
|
+
json=payload,
|
|
134
|
+
stream=True,
|
|
135
|
+
timeout=self.timeout,
|
|
136
|
+
proxies=self.proxies
|
|
137
|
+
) as response:
|
|
138
|
+
if not response.ok:
|
|
139
|
+
raise exceptions.APIConnectionError(
|
|
140
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
# Process the stream line by line
|
|
144
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
145
|
+
if not line: # Empty line indicates end of an event
|
|
146
|
+
if current_data_buffer:
|
|
147
|
+
# Process the completed event
|
|
148
|
+
full_data = "\n".join(current_data_buffer)
|
|
149
|
+
if current_event_name == "message":
|
|
150
|
+
try:
|
|
151
|
+
data_payload = json.loads(full_data)
|
|
152
|
+
# Check structure based on the API SearchResponse
|
|
153
|
+
if data_payload.get('type') == 'data':
|
|
154
|
+
content_chunk = data_payload.get('data', {}).get('content', "")
|
|
155
|
+
if content_chunk:
|
|
156
|
+
full_SearchResponse_content += content_chunk
|
|
157
|
+
|
|
158
|
+
# Yield the new content chunk
|
|
159
|
+
if raw:
|
|
160
|
+
yield {"text": content_chunk}
|
|
161
|
+
else:
|
|
162
|
+
yield SearchResponse(content_chunk)
|
|
163
|
+
except json.JSONDecodeError:
|
|
164
|
+
pass
|
|
165
|
+
except Exception as e:
|
|
166
|
+
# Handle exceptions gracefully in stream processing
|
|
167
|
+
pass
|
|
168
|
+
|
|
169
|
+
# Reset for the next event
|
|
170
|
+
current_event_name = None
|
|
171
|
+
current_data_buffer = []
|
|
172
|
+
continue
|
|
173
|
+
|
|
174
|
+
# Parse SSE fields
|
|
175
|
+
if line.startswith('event:'):
|
|
176
|
+
current_event_name = line[len('event:'):].strip()
|
|
177
|
+
elif line.startswith('data:'):
|
|
178
|
+
data_part = line[len('data:'):]
|
|
179
|
+
# Remove leading space if present (common in SSE)
|
|
180
|
+
if data_part.startswith(' '):
|
|
181
|
+
data_part = data_part[1:]
|
|
182
|
+
current_data_buffer.append(data_part)
|
|
183
|
+
|
|
184
|
+
# Process any remaining data in buffer if stream ended without blank line
|
|
185
|
+
if current_data_buffer and current_event_name == "message":
|
|
186
|
+
try:
|
|
187
|
+
full_data = "\n".join(current_data_buffer)
|
|
188
|
+
data_payload = json.loads(full_data)
|
|
189
|
+
if data_payload.get('type') == 'data':
|
|
190
|
+
content_chunk = data_payload.get('data', {}).get('content', "")
|
|
191
|
+
if content_chunk and len(content_chunk) > len(full_SearchResponse_content):
|
|
192
|
+
delta = content_chunk[len(full_SearchResponse_content):]
|
|
193
|
+
full_SearchResponse_content += delta
|
|
194
|
+
|
|
195
|
+
if raw:
|
|
196
|
+
yield {"text": delta}
|
|
197
|
+
else:
|
|
198
|
+
yield SearchResponse(delta)
|
|
199
|
+
except (json.JSONDecodeError, Exception):
|
|
200
|
+
pass
|
|
201
|
+
|
|
202
|
+
except requests.exceptions.Timeout:
|
|
203
|
+
raise exceptions.APIConnectionError("Request timed out")
|
|
204
|
+
except requests.exceptions.RequestException as e:
|
|
205
|
+
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
206
|
+
|
|
207
|
+
def for_non_stream():
|
|
208
|
+
full_SearchResponse = ""
|
|
209
|
+
for chunk in for_stream():
|
|
210
|
+
if raw:
|
|
211
|
+
yield chunk
|
|
212
|
+
else:
|
|
213
|
+
full_SearchResponse += str(chunk)
|
|
214
|
+
|
|
215
|
+
if not raw:
|
|
216
|
+
# Format the SearchResponse for better readability
|
|
217
|
+
formatted_SearchResponse = self.format_SearchResponse(full_SearchResponse)
|
|
218
|
+
self.last_response = SearchResponse(formatted_SearchResponse)
|
|
219
|
+
return self.last_SearchResponse
|
|
220
|
+
|
|
221
|
+
return for_stream() if stream else for_non_stream()
|
|
222
|
+
|
|
223
|
+
@staticmethod
|
|
224
|
+
def format_SearchResponse(text: str) -> str:
|
|
225
|
+
"""Format the SearchResponse text for better readability.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
text (str): The raw SearchResponse text
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
str: Formatted text with improved structure
|
|
232
|
+
"""
|
|
233
|
+
# Clean up formatting
|
|
234
|
+
# Remove excessive newlines
|
|
235
|
+
clean_text = re.sub(r'\n{3,}', '\n\n', text)
|
|
236
|
+
|
|
237
|
+
# Ensure consistent spacing around sections
|
|
238
|
+
clean_text = re.sub(r'([.!?])\s*\n\s*([A-Z])', r'\1\n\n\2', clean_text)
|
|
239
|
+
|
|
240
|
+
# Clean up any leftover HTML or markdown artifacts
|
|
241
|
+
clean_text = re.sub(r'<[^>]*>', '', clean_text)
|
|
242
|
+
|
|
243
|
+
# Remove trailing whitespace on each line
|
|
244
|
+
clean_text = '\n'.join(line.rstrip() for line in clean_text.split('\n'))
|
|
245
|
+
|
|
246
|
+
return clean_text.strip()
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
if __name__ == "__main__":
|
|
250
|
+
from rich import print
|
|
251
|
+
|
|
252
|
+
ai = webpilotai()
|
|
253
|
+
r = ai.search(input(">>> "), stream=True, raw=False)
|
|
254
|
+
for chunk in r:
|
|
255
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
from curl_cffi import CurlError
|
|
2
|
+
from curl_cffi.requests import Session
|
|
3
|
+
import json
|
|
4
|
+
import uuid
|
|
5
|
+
import time
|
|
6
|
+
import hashlib
|
|
7
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
8
|
+
|
|
9
|
+
from webscout.AIutel import Optimizers
|
|
10
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
11
|
+
from webscout.AIutel import AwesomePrompts
|
|
12
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
13
|
+
from webscout import exceptions
|
|
14
|
+
from webscout.litagent import LitAgent
|
|
15
|
+
|
|
16
|
+
class Aitopia(Provider):
|
|
17
|
+
"""
|
|
18
|
+
A class to interact with the Aitopia API with LitAgent user-agent.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
AVAILABLE_MODELS = [
|
|
22
|
+
"Claude 3 Haiku",
|
|
23
|
+
"GPT-4o Mini",
|
|
24
|
+
"Gemini 1.5 Flash",
|
|
25
|
+
"Llama 3.1 70B"
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
is_conversation: bool = True,
|
|
31
|
+
max_tokens: int = 2049,
|
|
32
|
+
timeout: int = 30,
|
|
33
|
+
intro: str = None,
|
|
34
|
+
filepath: str = None,
|
|
35
|
+
update_file: bool = True,
|
|
36
|
+
proxies: dict = {},
|
|
37
|
+
history_offset: int = 10250,
|
|
38
|
+
act: str = None,
|
|
39
|
+
model: str = "Claude 3 Haiku",
|
|
40
|
+
browser: str = "chrome"
|
|
41
|
+
):
|
|
42
|
+
"""Initializes the Aitopia API client."""
|
|
43
|
+
if model not in self.AVAILABLE_MODELS:
|
|
44
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
45
|
+
|
|
46
|
+
self.url = "https://extensions.aitopia.ai/ai/send"
|
|
47
|
+
|
|
48
|
+
# Initialize LitAgent for user agent generation
|
|
49
|
+
self.agent = LitAgent()
|
|
50
|
+
# Use fingerprinting to create a consistent browser identity
|
|
51
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
52
|
+
|
|
53
|
+
# Use the fingerprint for headers
|
|
54
|
+
self.headers = {
|
|
55
|
+
"accept": "text/plain",
|
|
56
|
+
"accept-language": self.fingerprint["accept_language"],
|
|
57
|
+
"content-type": "text/plain;charset=UTF-8",
|
|
58
|
+
"dnt": "1",
|
|
59
|
+
"origin": "https://chat.aitopia.ai",
|
|
60
|
+
"priority": "u=1, i",
|
|
61
|
+
"referer": "https://chat.aitopia.ai/",
|
|
62
|
+
"sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Chromium";v="134", "Not:A-Brand";v="24", "Microsoft Edge";v="134"',
|
|
63
|
+
"sec-ch-ua-mobile": "?0",
|
|
64
|
+
"sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
|
|
65
|
+
"sec-fetch-dest": "empty",
|
|
66
|
+
"sec-fetch-mode": "cors",
|
|
67
|
+
"sec-fetch-site": "same-site",
|
|
68
|
+
"user-agent": self.fingerprint["user_agent"]
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
self.session = Session() # Use curl_cffi Session
|
|
72
|
+
self.session.headers.update(self.headers)
|
|
73
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
74
|
+
|
|
75
|
+
self.is_conversation = is_conversation
|
|
76
|
+
self.max_tokens_to_sample = max_tokens
|
|
77
|
+
self.timeout = timeout
|
|
78
|
+
self.last_response = {}
|
|
79
|
+
self.model = model
|
|
80
|
+
|
|
81
|
+
self.__available_optimizers = (
|
|
82
|
+
method
|
|
83
|
+
for method in dir(Optimizers)
|
|
84
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
85
|
+
)
|
|
86
|
+
Conversation.intro = (
|
|
87
|
+
AwesomePrompts().get_act(
|
|
88
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
89
|
+
)
|
|
90
|
+
if act
|
|
91
|
+
else intro or Conversation.intro
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
self.conversation = Conversation(
|
|
95
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
96
|
+
)
|
|
97
|
+
self.conversation.history_offset = history_offset
|
|
98
|
+
|
|
99
|
+
def refresh_identity(self, browser: str = None):
|
|
100
|
+
"""
|
|
101
|
+
Refreshes the browser identity fingerprint.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
browser: Specific browser to use for the new fingerprint
|
|
105
|
+
"""
|
|
106
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
107
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
108
|
+
|
|
109
|
+
# Update headers with new fingerprint
|
|
110
|
+
self.headers.update({
|
|
111
|
+
"accept-language": self.fingerprint["accept_language"],
|
|
112
|
+
"sec-ch-ua": self.fingerprint["sec_ch_ua"] or self.headers["sec-ch-ua"],
|
|
113
|
+
"sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
|
|
114
|
+
"user-agent": self.fingerprint["user_agent"],
|
|
115
|
+
})
|
|
116
|
+
|
|
117
|
+
# Update session headers
|
|
118
|
+
for header, value in self.headers.items():
|
|
119
|
+
self.session.headers[header] = value
|
|
120
|
+
|
|
121
|
+
return self.fingerprint
|
|
122
|
+
|
|
123
|
+
def generate_uuid_search(self):
|
|
124
|
+
"""Generate a UUID and convert to base64-like string."""
|
|
125
|
+
uuid_str = str(uuid.uuid4())
|
|
126
|
+
return uuid_str.replace('-', '')
|
|
127
|
+
|
|
128
|
+
def generate_hopekey(self):
|
|
129
|
+
"""Generate a random string and hash it."""
|
|
130
|
+
random_str = str(uuid.uuid4()) + str(time.time())
|
|
131
|
+
return hashlib.md5(random_str.encode()).hexdigest()
|
|
132
|
+
|
|
133
|
+
@staticmethod
|
|
134
|
+
def _aitopia_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
135
|
+
"""Extracts content from Aitopia stream JSON objects."""
|
|
136
|
+
if isinstance(chunk, dict):
|
|
137
|
+
# Handle Claude 3 Haiku response format
|
|
138
|
+
if "delta" in chunk and "text" in chunk["delta"]:
|
|
139
|
+
return chunk["delta"]["text"]
|
|
140
|
+
# Handle GPT-4o Mini response format
|
|
141
|
+
elif "choices" in chunk and "0" in chunk["choices"]:
|
|
142
|
+
return chunk["choices"]["0"]["delta"].get("content")
|
|
143
|
+
# Add other potential formats here if needed
|
|
144
|
+
return None
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def ask(
|
|
148
|
+
self,
|
|
149
|
+
prompt: str,
|
|
150
|
+
stream: bool = False,
|
|
151
|
+
raw: bool = False,
|
|
152
|
+
optimizer: str = None,
|
|
153
|
+
conversationally: bool = False,
|
|
154
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
155
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
156
|
+
if optimizer:
|
|
157
|
+
if optimizer in self.__available_optimizers:
|
|
158
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
159
|
+
conversation_prompt if conversationally else prompt
|
|
160
|
+
)
|
|
161
|
+
else:
|
|
162
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
163
|
+
|
|
164
|
+
# Generate hopekey and update headers
|
|
165
|
+
hopekey = self.generate_hopekey()
|
|
166
|
+
self.headers["hopekey"] = hopekey
|
|
167
|
+
|
|
168
|
+
# Default history if none provided
|
|
169
|
+
history = [
|
|
170
|
+
{
|
|
171
|
+
"item": "Hello, how can I help you today?",
|
|
172
|
+
"role": "assistant",
|
|
173
|
+
# "model": "GPT-4o Mini"
|
|
174
|
+
}
|
|
175
|
+
]
|
|
176
|
+
|
|
177
|
+
# Generate current timestamp for chat_id
|
|
178
|
+
current_time = int(time.time() * 1000)
|
|
179
|
+
|
|
180
|
+
# Request payload
|
|
181
|
+
payload = {
|
|
182
|
+
"history": history,
|
|
183
|
+
"text": conversation_prompt,
|
|
184
|
+
"model": self.model,
|
|
185
|
+
"stream": stream,
|
|
186
|
+
"uuid_search": self.generate_uuid_search(),
|
|
187
|
+
"mode": "ai_chat",
|
|
188
|
+
"prompt_mode": False,
|
|
189
|
+
"extra_key": "__all",
|
|
190
|
+
"extra_data": {"prompt_mode": False},
|
|
191
|
+
"chat_id": current_time,
|
|
192
|
+
"language_detail": {
|
|
193
|
+
"lang_code": "en",
|
|
194
|
+
"name": "English",
|
|
195
|
+
"title": "English"
|
|
196
|
+
},
|
|
197
|
+
"is_continue": False,
|
|
198
|
+
"lang_code": "en"
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
def for_stream():
|
|
202
|
+
streaming_text = "" # Initialize outside try block
|
|
203
|
+
try:
|
|
204
|
+
response = self.session.post(
|
|
205
|
+
self.url, headers=self.headers, json=payload, stream=True, timeout=self.timeout,
|
|
206
|
+
impersonate="chrome120" # Add impersonate
|
|
207
|
+
)
|
|
208
|
+
response.raise_for_status()
|
|
209
|
+
|
|
210
|
+
# Use sanitize_stream
|
|
211
|
+
processed_stream = sanitize_stream(
|
|
212
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
213
|
+
intro_value="data:",
|
|
214
|
+
to_json=True, # Stream sends JSON
|
|
215
|
+
skip_markers=["[DONE]"],
|
|
216
|
+
content_extractor=self._aitopia_extractor, # Use the specific extractor
|
|
217
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
for content_chunk in processed_stream:
|
|
221
|
+
# content_chunk is the string extracted by _aitopia_extractor
|
|
222
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
223
|
+
streaming_text += content_chunk
|
|
224
|
+
resp = dict(text=content_chunk)
|
|
225
|
+
yield resp if not raw else content_chunk
|
|
226
|
+
|
|
227
|
+
except CurlError as e:
|
|
228
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
229
|
+
except Exception as e:
|
|
230
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
231
|
+
finally:
|
|
232
|
+
# Update history after stream finishes or fails
|
|
233
|
+
if streaming_text:
|
|
234
|
+
self.last_response = {"text": streaming_text}
|
|
235
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
236
|
+
|
|
237
|
+
def for_non_stream():
|
|
238
|
+
try:
|
|
239
|
+
response = self.session.post(
|
|
240
|
+
self.url, headers=self.headers, json=payload, timeout=self.timeout,
|
|
241
|
+
impersonate="chrome120" # Add impersonate
|
|
242
|
+
)
|
|
243
|
+
response.raise_for_status()
|
|
244
|
+
|
|
245
|
+
response_text_raw = response.text # Get raw text
|
|
246
|
+
|
|
247
|
+
# Use sanitize_stream to parse the non-streaming JSON response
|
|
248
|
+
# Assuming non-stream uses the GPT format based on original code
|
|
249
|
+
processed_stream = sanitize_stream(
|
|
250
|
+
data=response_text_raw,
|
|
251
|
+
to_json=True, # Parse the whole text as JSON
|
|
252
|
+
intro_value=None,
|
|
253
|
+
content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
|
|
254
|
+
yield_raw_on_error=False
|
|
255
|
+
)
|
|
256
|
+
# Extract the single result
|
|
257
|
+
content = next(processed_stream, None)
|
|
258
|
+
content = content if isinstance(content, str) else "" # Ensure it's a string
|
|
259
|
+
|
|
260
|
+
if content: # Check if content was successfully extracted
|
|
261
|
+
self.last_response = {"text": content}
|
|
262
|
+
self.conversation.update_chat_history(prompt, content)
|
|
263
|
+
return {"text": content}
|
|
264
|
+
else:
|
|
265
|
+
raise exceptions.FailedToGenerateResponseError("No response content found or failed to parse")
|
|
266
|
+
except CurlError as e:
|
|
267
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
268
|
+
except Exception as e:
|
|
269
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
270
|
+
|
|
271
|
+
return for_stream() if stream else for_non_stream()
|
|
272
|
+
|
|
273
|
+
def chat(
|
|
274
|
+
self,
|
|
275
|
+
prompt: str,
|
|
276
|
+
stream: bool = False,
|
|
277
|
+
optimizer: str = None,
|
|
278
|
+
conversationally: bool = False,
|
|
279
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
280
|
+
def for_stream():
|
|
281
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
282
|
+
yield self.get_message(response)
|
|
283
|
+
def for_non_stream():
|
|
284
|
+
return self.get_message(
|
|
285
|
+
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
286
|
+
)
|
|
287
|
+
return for_stream() if stream else for_non_stream()
|
|
288
|
+
|
|
289
|
+
def get_message(self, response: dict) -> str:
|
|
290
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
291
|
+
return response["text"]
|
|
292
|
+
|
|
293
|
+
if __name__ == "__main__":
|
|
294
|
+
print("-" * 80)
|
|
295
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
296
|
+
print("-" * 80)
|
|
297
|
+
|
|
298
|
+
for model in Aitopia.AVAILABLE_MODELS:
|
|
299
|
+
try:
|
|
300
|
+
test_ai = Aitopia(model=model, timeout=60)
|
|
301
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
302
|
+
response_text = ""
|
|
303
|
+
for chunk in response:
|
|
304
|
+
response_text += chunk
|
|
305
|
+
|
|
306
|
+
if response_text and len(response_text.strip()) > 0:
|
|
307
|
+
status = "✓"
|
|
308
|
+
# Clean and truncate response
|
|
309
|
+
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
310
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
311
|
+
else:
|
|
312
|
+
status = "✗"
|
|
313
|
+
display_text = "Empty or invalid response"
|
|
314
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
315
|
+
except Exception as e:
|
|
316
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|