webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -239
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
- webscout/Extra/autocoder/autocoder.py +309 -114
- webscout/Extra/autocoder/autocoder_utiles.py +15 -15
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/weather.md +281 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Provider/AISEARCH/DeepFind.py +41 -37
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +0 -1
- webscout/Provider/AISEARCH/genspark_search.py +228 -86
- webscout/Provider/AISEARCH/hika_search.py +11 -11
- webscout/Provider/AISEARCH/scira_search.py +324 -322
- webscout/Provider/AllenAI.py +7 -14
- webscout/Provider/Blackboxai.py +518 -74
- webscout/Provider/Cloudflare.py +0 -1
- webscout/Provider/Deepinfra.py +23 -21
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/GizAI.py +15 -5
- webscout/Provider/Glider.py +11 -8
- webscout/Provider/HeckAI.py +80 -52
- webscout/Provider/Koboldai.py +7 -4
- webscout/Provider/LambdaChat.py +2 -2
- webscout/Provider/Marcus.py +10 -18
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +8 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -286
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +217 -14
- webscout/Provider/OPENAI/c4ai.py +373 -367
- webscout/Provider/OPENAI/chatgpt.py +7 -0
- webscout/Provider/OPENAI/chatgptclone.py +7 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +30 -20
- webscout/Provider/OPENAI/e2b.py +6 -0
- webscout/Provider/OPENAI/exaai.py +7 -0
- webscout/Provider/OPENAI/exachat.py +6 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -352
- webscout/Provider/OPENAI/glider.py +323 -316
- webscout/Provider/OPENAI/groq.py +361 -354
- webscout/Provider/OPENAI/heckai.py +30 -64
- webscout/Provider/OPENAI/llmchatco.py +8 -0
- webscout/Provider/OPENAI/mcpcore.py +7 -0
- webscout/Provider/OPENAI/multichat.py +8 -0
- webscout/Provider/OPENAI/netwrck.py +356 -350
- webscout/Provider/OPENAI/opkfc.py +8 -0
- webscout/Provider/OPENAI/scirachat.py +471 -462
- webscout/Provider/OPENAI/sonus.py +9 -0
- webscout/Provider/OPENAI/standardinput.py +9 -1
- webscout/Provider/OPENAI/textpollinations.py +339 -329
- webscout/Provider/OPENAI/toolbaz.py +7 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -346
- webscout/Provider/OPENAI/uncovrAI.py +7 -0
- webscout/Provider/OPENAI/utils.py +103 -7
- webscout/Provider/OPENAI/venice.py +12 -0
- webscout/Provider/OPENAI/wisecat.py +19 -19
- webscout/Provider/OPENAI/writecream.py +7 -0
- webscout/Provider/OPENAI/x0gpt.py +7 -0
- webscout/Provider/OPENAI/yep.py +50 -21
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/speechma.py +500 -100
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TeachAnything.py +3 -7
- webscout/Provider/TextPollinationsAI.py +4 -2
- webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Writecream.py +11 -2
- webscout/Provider/__init__.py +8 -14
- webscout/Provider/ai4chat.py +4 -58
- webscout/Provider/asksteve.py +17 -9
- webscout/Provider/cerebras.py +3 -1
- webscout/Provider/koala.py +170 -268
- webscout/Provider/llmchat.py +3 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +7 -4
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +4 -2
- webscout/Provider/typefully.py +23 -151
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/scout/README.md +402 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +134 -54
- webscout/zeroart/base.py +19 -13
- webscout/zeroart/effects.py +101 -99
- webscout/zeroart/fonts.py +1239 -816
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.8.dist-info/entry_points.txt +3 -0
- webscout-8.2.8.dist-info/top_level.txt +1 -0
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/scout/core.py +0 -881
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -1,256 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import re
|
|
4
|
-
from typing import Dict, Optional, Generator, Any, Union
|
|
5
|
-
from webscout.litagent import LitAgent
|
|
6
|
-
from webscout import exceptions
|
|
7
|
-
from webscout.AIbase import AISearch
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class Response:
|
|
11
|
-
"""A wrapper class for Felo API responses.
|
|
12
|
-
|
|
13
|
-
This class automatically converts response objects to their text representation
|
|
14
|
-
when printed or converted to string.
|
|
15
|
-
|
|
16
|
-
Attributes:
|
|
17
|
-
text (str): The text content of the response
|
|
18
|
-
|
|
19
|
-
Example:
|
|
20
|
-
>>> response = Response("Hello, world!")
|
|
21
|
-
>>> print(response)
|
|
22
|
-
Hello, world!
|
|
23
|
-
>>> str(response)
|
|
24
|
-
'Hello, world!'
|
|
25
|
-
"""
|
|
26
|
-
def __init__(self, text: str):
|
|
27
|
-
self.text = text
|
|
28
|
-
|
|
29
|
-
def __str__(self):
|
|
30
|
-
return self.text
|
|
31
|
-
|
|
32
|
-
def __repr__(self):
|
|
33
|
-
return self.text
|
|
34
|
-
class Isou(AISearch):
|
|
35
|
-
"""A class to interact with the Isou AI search API.
|
|
36
|
-
|
|
37
|
-
Isou provides a powerful search interface that returns AI-generated responses
|
|
38
|
-
based on web content. It supports both streaming and non-streaming responses.
|
|
39
|
-
|
|
40
|
-
Basic Usage:
|
|
41
|
-
>>> from webscout import Isou
|
|
42
|
-
>>> ai = Isou()
|
|
43
|
-
>>> # Non-streaming example
|
|
44
|
-
>>> response = ai.search("What is Python?")
|
|
45
|
-
>>> print(response)
|
|
46
|
-
Python is a high-level programming language...
|
|
47
|
-
|
|
48
|
-
>>> # Streaming example
|
|
49
|
-
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
50
|
-
... print(chunk, end="", flush=True)
|
|
51
|
-
Artificial Intelligence is...
|
|
52
|
-
|
|
53
|
-
>>> # Raw response format
|
|
54
|
-
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
55
|
-
... print(chunk)
|
|
56
|
-
{'text': 'Hello', 'links': ['http://...']}
|
|
57
|
-
|
|
58
|
-
Args:
|
|
59
|
-
timeout (int, optional): Request timeout in seconds. Defaults to 120.
|
|
60
|
-
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
61
|
-
"""
|
|
62
|
-
|
|
63
|
-
def __init__(
|
|
64
|
-
self,
|
|
65
|
-
timeout: int = 120,
|
|
66
|
-
proxies: Optional[dict] = None,
|
|
67
|
-
model: str = "siliconflow:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
68
|
-
):
|
|
69
|
-
"""Initialize the Isou API client.
|
|
70
|
-
|
|
71
|
-
Args:
|
|
72
|
-
timeout (int, optional): Request timeout in seconds. Defaults to 120.
|
|
73
|
-
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
74
|
-
model (str, optional): Model to use for search. Defaults to DeepSeek-R1.
|
|
75
|
-
"""
|
|
76
|
-
self.available_models = [
|
|
77
|
-
"siliconflow:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
78
|
-
"siliconflow:Qwen/Qwen2.5-72B-Instruct-128K",
|
|
79
|
-
"deepseek-reasoner"
|
|
80
|
-
]
|
|
81
|
-
|
|
82
|
-
if model not in self.available_models:
|
|
83
|
-
raise ValueError(
|
|
84
|
-
f"Invalid model: {model}. Choose from: {self.available_models}"
|
|
85
|
-
)
|
|
86
|
-
|
|
87
|
-
self.session = requests.Session()
|
|
88
|
-
self.api_endpoint = "https://isou.chat/api/search"
|
|
89
|
-
self.stream_chunk_size = 64
|
|
90
|
-
self.timeout = timeout
|
|
91
|
-
self.last_response = {}
|
|
92
|
-
self.model = model
|
|
93
|
-
self.provider = "siliconflow" if model in self.available_models[:2] else "deepseek"
|
|
94
|
-
self.mode = "simple" # or "deep"
|
|
95
|
-
self.categories = "general" # or "science"
|
|
96
|
-
self.reload = False
|
|
97
|
-
|
|
98
|
-
self.headers = {
|
|
99
|
-
"accept": "text/event-stream",
|
|
100
|
-
"pragma": "no-cache",
|
|
101
|
-
"referer": "https://isou.chat/search?q=hi",
|
|
102
|
-
"accept-language": "en-US,en;q=0.9",
|
|
103
|
-
"content-type": "application/json",
|
|
104
|
-
"sec-ch-ua": '"Not(A:Brand";v="99", "Brave";v="133", "Chromium";v="133"',
|
|
105
|
-
"sec-ch-ua-mobile": "?0",
|
|
106
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
107
|
-
"sec-fetch-dest": "empty",
|
|
108
|
-
"sec-fetch-mode": "cors",
|
|
109
|
-
"sec-fetch-site": "same-origin",
|
|
110
|
-
"sec-gpc": "1",
|
|
111
|
-
"user-agent": LitAgent().random(),
|
|
112
|
-
}
|
|
113
|
-
self.session.headers.update(self.headers)
|
|
114
|
-
self.proxies = proxies
|
|
115
|
-
|
|
116
|
-
def search(
|
|
117
|
-
self,
|
|
118
|
-
prompt: str,
|
|
119
|
-
stream: bool = False,
|
|
120
|
-
raw: bool = False,
|
|
121
|
-
) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
|
|
122
|
-
"""Search using the Isou API and get AI-generated responses.
|
|
123
|
-
|
|
124
|
-
Args:
|
|
125
|
-
prompt (str): The search query or prompt to send to the API.
|
|
126
|
-
stream (bool, optional): If True, yields response chunks as they arrive.
|
|
127
|
-
If False, returns complete response. Defaults to False.
|
|
128
|
-
raw (bool, optional): If True, returns raw response dictionaries.
|
|
129
|
-
If False, returns Response objects. Defaults to False.
|
|
130
|
-
|
|
131
|
-
Returns:
|
|
132
|
-
Union[Dict[str, Any], Generator[Dict[str, Any], None, None]]:
|
|
133
|
-
- If stream=False: Returns complete response
|
|
134
|
-
- If stream=True: Yields response chunks as they arrive
|
|
135
|
-
|
|
136
|
-
Raises:
|
|
137
|
-
APIConnectionError: If the API request fails
|
|
138
|
-
"""
|
|
139
|
-
self.provider = "siliconflow" if self.model in self.available_models[:2] else "deepseek"
|
|
140
|
-
|
|
141
|
-
payload = {
|
|
142
|
-
"categories": [self.categories],
|
|
143
|
-
"engine": "SEARXNG",
|
|
144
|
-
"language": "all",
|
|
145
|
-
"mode": self.mode,
|
|
146
|
-
"model": self.model,
|
|
147
|
-
"provider": self.provider,
|
|
148
|
-
"reload": self.reload,
|
|
149
|
-
"stream": stream,
|
|
150
|
-
}
|
|
151
|
-
params = {"q": prompt}
|
|
152
|
-
|
|
153
|
-
def for_stream() -> Generator[Dict[str, Any], None, None]:
|
|
154
|
-
full_text = ""
|
|
155
|
-
links = []
|
|
156
|
-
try:
|
|
157
|
-
with self.session.post(
|
|
158
|
-
self.api_endpoint,
|
|
159
|
-
params=params,
|
|
160
|
-
json=payload,
|
|
161
|
-
stream=True,
|
|
162
|
-
timeout=self.timeout,
|
|
163
|
-
) as response:
|
|
164
|
-
response.raise_for_status()
|
|
165
|
-
for line in response.iter_lines(chunk_size=self.stream_chunk_size, decode_unicode=True):
|
|
166
|
-
if not line or not line.startswith('data:'):
|
|
167
|
-
continue
|
|
168
|
-
try:
|
|
169
|
-
data = json.loads(line[5:].strip())
|
|
170
|
-
|
|
171
|
-
# Handle nested data structure
|
|
172
|
-
if 'data' in data and isinstance(data['data'], str):
|
|
173
|
-
try:
|
|
174
|
-
nested_data = json.loads(data['data'])
|
|
175
|
-
data.update(nested_data)
|
|
176
|
-
except json.JSONDecodeError:
|
|
177
|
-
pass
|
|
178
|
-
|
|
179
|
-
# Extract content and ensure it's properly decoded
|
|
180
|
-
if 'content' in data:
|
|
181
|
-
content = data['content']
|
|
182
|
-
if isinstance(content, str):
|
|
183
|
-
# Get only the new text (delta)
|
|
184
|
-
delta = content[len(full_text):]
|
|
185
|
-
full_text = content
|
|
186
|
-
|
|
187
|
-
# Yield the chunk
|
|
188
|
-
if raw:
|
|
189
|
-
yield {"text": delta, "links": links}
|
|
190
|
-
else:
|
|
191
|
-
yield Response(delta)
|
|
192
|
-
|
|
193
|
-
# Extract links
|
|
194
|
-
if 'links' in data and isinstance(data['links'], list):
|
|
195
|
-
links.extend(data['links'])
|
|
196
|
-
|
|
197
|
-
except json.JSONDecodeError:
|
|
198
|
-
continue
|
|
199
|
-
|
|
200
|
-
except requests.exceptions.RequestException as e:
|
|
201
|
-
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
202
|
-
|
|
203
|
-
def for_non_stream():
|
|
204
|
-
full_response = ""
|
|
205
|
-
all_links = []
|
|
206
|
-
for chunk in for_stream():
|
|
207
|
-
if raw:
|
|
208
|
-
yield chunk
|
|
209
|
-
else:
|
|
210
|
-
full_response += str(chunk)
|
|
211
|
-
if isinstance(chunk, dict):
|
|
212
|
-
all_links.extend(chunk.get("links", []))
|
|
213
|
-
|
|
214
|
-
if not raw:
|
|
215
|
-
self.last_response = Response(full_response)
|
|
216
|
-
return self.last_response
|
|
217
|
-
|
|
218
|
-
return for_stream() if stream else for_non_stream()
|
|
219
|
-
|
|
220
|
-
@staticmethod
|
|
221
|
-
def format_response(text: str, links: list) -> str:
|
|
222
|
-
"""Format the response text with numbered citations and link list.
|
|
223
|
-
|
|
224
|
-
Args:
|
|
225
|
-
text (str): The response text with citation markers
|
|
226
|
-
links (list): List of reference links
|
|
227
|
-
|
|
228
|
-
Returns:
|
|
229
|
-
str: Formatted text with numbered citations and link list
|
|
230
|
-
"""
|
|
231
|
-
# Clean up text
|
|
232
|
-
text = re.sub(r'\s+', ' ', text).strip()
|
|
233
|
-
|
|
234
|
-
# Replace citations with numbers
|
|
235
|
-
link_map = {f"citation:{i}]]": f"[{i}]" for i, _ in enumerate(links, start=1)}
|
|
236
|
-
for key, value in link_map.items():
|
|
237
|
-
text = text.replace(key, value)
|
|
238
|
-
text = text.replace("[[[", "[")
|
|
239
|
-
|
|
240
|
-
# Format link list
|
|
241
|
-
link_list = "\n".join([f"{i}. {link}" for i, link in enumerate(links, start=1)])
|
|
242
|
-
|
|
243
|
-
return f"{text}\n\nLinks:\n{link_list}"
|
|
244
|
-
|
|
245
|
-
if __name__ == "__main__":
|
|
246
|
-
from rich import print
|
|
247
|
-
|
|
248
|
-
# Initialize with specific model and logging
|
|
249
|
-
ai = Isou(
|
|
250
|
-
model="siliconflow:deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
251
|
-
)
|
|
252
|
-
|
|
253
|
-
response = ai.search(input(">>> "), stream=True, raw=False)
|
|
254
|
-
for chunk in response:
|
|
255
|
-
print(chunk, end="", flush=True)
|
|
256
|
-
|