webscout 8.0__py3-none-any.whl → 8.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/__init__.py +6 -0
- inferno/__main__.py +9 -0
- inferno/cli.py +6 -0
- webscout/Local/__init__.py +6 -0
- webscout/Local/__main__.py +9 -0
- webscout/Local/api.py +576 -0
- webscout/Local/cli.py +338 -0
- webscout/Local/config.py +75 -0
- webscout/Local/llm.py +188 -0
- webscout/Local/model_manager.py +205 -0
- webscout/Local/server.py +187 -0
- webscout/Local/utils.py +93 -0
- webscout/Provider/AISEARCH/DeepFind.py +1 -1
- webscout/Provider/AISEARCH/ISou.py +1 -1
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/__init__.py +3 -1
- webscout/Provider/AISEARCH/felo_search.py +1 -1
- webscout/Provider/AISEARCH/genspark_search.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +1 -1
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/scira_search.py +9 -5
- webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +18 -8
- webscout/Provider/GithubChat.py +5 -1
- webscout/Provider/Glider.py +4 -2
- webscout/Provider/Jadve.py +2 -2
- webscout/Provider/OPENAI/__init__.py +24 -0
- webscout/Provider/OPENAI/base.py +46 -0
- webscout/Provider/OPENAI/c4ai.py +347 -0
- webscout/Provider/OPENAI/chatgpt.py +549 -0
- webscout/Provider/OPENAI/chatgptclone.py +460 -0
- webscout/Provider/OPENAI/deepinfra.py +284 -0
- webscout/Provider/OPENAI/exaai.py +419 -0
- webscout/Provider/OPENAI/exachat.py +433 -0
- webscout/Provider/OPENAI/freeaichat.py +355 -0
- webscout/Provider/OPENAI/glider.py +316 -0
- webscout/Provider/OPENAI/heckai.py +337 -0
- webscout/Provider/OPENAI/llmchatco.py +327 -0
- webscout/Provider/OPENAI/netwrck.py +348 -0
- webscout/Provider/OPENAI/opkfc.py +488 -0
- webscout/Provider/OPENAI/scirachat.py +463 -0
- webscout/Provider/OPENAI/sonus.py +294 -0
- webscout/Provider/OPENAI/standardinput.py +425 -0
- webscout/Provider/OPENAI/textpollinations.py +285 -0
- webscout/Provider/OPENAI/toolbaz.py +405 -0
- webscout/Provider/OPENAI/typegpt.py +361 -0
- webscout/Provider/OPENAI/uncovrAI.py +455 -0
- webscout/Provider/OPENAI/utils.py +211 -0
- webscout/Provider/OPENAI/venice.py +428 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +158 -0
- webscout/Provider/OPENAI/x0gpt.py +389 -0
- webscout/Provider/OPENAI/yep.py +329 -0
- webscout/Provider/StandardInput.py +278 -0
- webscout/Provider/TextPollinationsAI.py +27 -28
- webscout/Provider/Venice.py +1 -1
- webscout/Provider/Writecream.py +211 -0
- webscout/Provider/WritingMate.py +197 -0
- webscout/Provider/Youchat.py +30 -26
- webscout/Provider/__init__.py +14 -6
- webscout/Provider/koala.py +2 -2
- webscout/Provider/llmchatco.py +5 -0
- webscout/Provider/scira_chat.py +18 -12
- webscout/Provider/scnet.py +187 -0
- webscout/Provider/toolbaz.py +320 -0
- webscout/Provider/typegpt.py +3 -184
- webscout/Provider/uncovr.py +3 -3
- webscout/conversation.py +32 -32
- webscout/prompt_manager.py +2 -1
- webscout/version.py +1 -1
- webscout-8.2.dist-info/METADATA +734 -0
- {webscout-8.0.dist-info → webscout-8.2.dist-info}/RECORD +77 -32
- webscout-8.2.dist-info/entry_points.txt +5 -0
- {webscout-8.0.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
- webscout/Provider/flowith.py +0 -207
- webscout-8.0.dist-info/METADATA +0 -995
- webscout-8.0.dist-info/entry_points.txt +0 -3
- {webscout-8.0.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
- {webscout-8.0.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,436 @@
|
|
|
1
|
+
import aiohttp
|
|
2
|
+
import asyncio
|
|
3
|
+
import lxml.html
|
|
4
|
+
import re
|
|
5
|
+
import urllib.parse
|
|
6
|
+
from markdownify import markdownify as md
|
|
7
|
+
from typing import Dict, Optional, Generator, Union, AsyncIterator, Literal
|
|
8
|
+
|
|
9
|
+
from webscout.AIbase import AISearch
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout.scout import Scout
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Response:
|
|
15
|
+
"""A wrapper class for IAsk API responses.
|
|
16
|
+
|
|
17
|
+
This class automatically converts response objects to their text representation
|
|
18
|
+
when printed or converted to string.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
text (str): The text content of the response
|
|
22
|
+
|
|
23
|
+
Example:
|
|
24
|
+
>>> response = Response("Hello, world!")
|
|
25
|
+
>>> print(response)
|
|
26
|
+
Hello, world!
|
|
27
|
+
>>> str(response)
|
|
28
|
+
'Hello, world!'
|
|
29
|
+
"""
|
|
30
|
+
def __init__(self, text: str):
|
|
31
|
+
self.text = text
|
|
32
|
+
|
|
33
|
+
def __str__(self):
|
|
34
|
+
return self.text
|
|
35
|
+
|
|
36
|
+
def __repr__(self):
|
|
37
|
+
return self.text
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def cache_find(diff: Union[dict, list]) -> Optional[str]:
|
|
41
|
+
"""Find HTML content in a nested dictionary or list structure.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
diff (Union[dict, list]): The nested structure to search
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Optional[str]: The found HTML content, or None if not found
|
|
48
|
+
"""
|
|
49
|
+
values = diff if isinstance(diff, list) else diff.values()
|
|
50
|
+
for value in values:
|
|
51
|
+
if isinstance(value, (list, dict)):
|
|
52
|
+
cache = cache_find(value)
|
|
53
|
+
if cache:
|
|
54
|
+
return cache
|
|
55
|
+
if isinstance(value, str) and re.search(r"<p>.+?</p>", value):
|
|
56
|
+
return md(value).strip()
|
|
57
|
+
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
ModeType = Literal["question", "academic", "fast", "forums", "wiki", "advanced"]
|
|
62
|
+
DetailLevelType = Literal["concise", "detailed", "comprehensive"]
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class IAsk(AISearch):
|
|
66
|
+
"""A class to interact with the IAsk AI search API.
|
|
67
|
+
|
|
68
|
+
IAsk provides a powerful search interface that returns AI-generated responses
|
|
69
|
+
based on web content. It supports both streaming and non-streaming responses,
|
|
70
|
+
as well as different search modes and detail levels.
|
|
71
|
+
|
|
72
|
+
Basic Usage:
|
|
73
|
+
>>> from webscout import IAsk
|
|
74
|
+
>>> ai = IAsk()
|
|
75
|
+
>>> # Non-streaming example
|
|
76
|
+
>>> response = ai.search("What is Python?")
|
|
77
|
+
>>> print(response)
|
|
78
|
+
Python is a high-level programming language...
|
|
79
|
+
|
|
80
|
+
>>> # Streaming example
|
|
81
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
82
|
+
... print(chunk, end="", flush=True)
|
|
83
|
+
Artificial Intelligence is...
|
|
84
|
+
|
|
85
|
+
>>> # With specific mode and detail level
|
|
86
|
+
>>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
|
|
87
|
+
>>> print(response)
|
|
88
|
+
Climate change refers to...
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 30.
|
|
92
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
93
|
+
mode (ModeType, optional): Default search mode. Defaults to "question".
|
|
94
|
+
detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def __init__(
|
|
98
|
+
self,
|
|
99
|
+
timeout: int = 30,
|
|
100
|
+
proxies: Optional[dict] = None,
|
|
101
|
+
mode: ModeType = "question",
|
|
102
|
+
detail_level: Optional[DetailLevelType] = None,
|
|
103
|
+
):
|
|
104
|
+
"""Initialize the IAsk API client.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 30.
|
|
108
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
109
|
+
mode (ModeType, optional): Default search mode. Defaults to "question".
|
|
110
|
+
detail_level (DetailLevelType, optional): Default detail level. Defaults to None.
|
|
111
|
+
"""
|
|
112
|
+
self.timeout = timeout
|
|
113
|
+
self.proxies = proxies or {}
|
|
114
|
+
self.default_mode = mode
|
|
115
|
+
self.default_detail_level = detail_level
|
|
116
|
+
self.api_endpoint = "https://iask.ai/"
|
|
117
|
+
self.last_response = {}
|
|
118
|
+
|
|
119
|
+
def create_url(self, query: str, mode: ModeType = "question", detail_level: Optional[DetailLevelType] = None) -> str:
|
|
120
|
+
"""Create a properly formatted URL with mode and detail level parameters.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
query (str): The search query.
|
|
124
|
+
mode (ModeType, optional): Search mode. Defaults to "question".
|
|
125
|
+
detail_level (DetailLevelType, optional): Detail level. Defaults to None.
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
str: Formatted URL with query parameters.
|
|
129
|
+
|
|
130
|
+
Example:
|
|
131
|
+
>>> ai = IAsk()
|
|
132
|
+
>>> url = ai.create_url("Climate change", mode="academic", detail_level="detailed")
|
|
133
|
+
>>> print(url)
|
|
134
|
+
https://iask.ai/?mode=academic&q=Climate+change&options%5Bdetail_level%5D=detailed
|
|
135
|
+
"""
|
|
136
|
+
# Create a dictionary of parameters with flattened structure
|
|
137
|
+
params = {
|
|
138
|
+
"mode": mode,
|
|
139
|
+
"q": query
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
# Add detail_level if provided using the flattened format
|
|
143
|
+
if detail_level:
|
|
144
|
+
params["options[detail_level]"] = detail_level
|
|
145
|
+
|
|
146
|
+
# Encode the parameters and build the URL
|
|
147
|
+
query_string = urllib.parse.urlencode(params)
|
|
148
|
+
url = f"{self.api_endpoint}?{query_string}"
|
|
149
|
+
|
|
150
|
+
return url
|
|
151
|
+
|
|
152
|
+
def format_html(self, html_content: str) -> str:
|
|
153
|
+
"""Format HTML content into a more readable text format.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
html_content (str): The HTML content to format.
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
str: Formatted text.
|
|
160
|
+
"""
|
|
161
|
+
scout = Scout(html_content, features='html.parser')
|
|
162
|
+
output_lines = []
|
|
163
|
+
|
|
164
|
+
for child in scout.find_all(['h1', 'h2', 'h3', 'p', 'ol', 'ul', 'div']):
|
|
165
|
+
if child.name in ["h1", "h2", "h3"]:
|
|
166
|
+
output_lines.append(f"\n**{child.get_text().strip()}**\n")
|
|
167
|
+
elif child.name == "p":
|
|
168
|
+
text = child.get_text().strip()
|
|
169
|
+
text = re.sub(r"^According to Ask AI & Question AI www\.iAsk\.ai:\s*", "", text).strip()
|
|
170
|
+
# Remove footnote markers
|
|
171
|
+
text = re.sub(r'\[\d+\]\(#fn:\d+ \'see footnote\'\)', '', text)
|
|
172
|
+
output_lines.append(text + "\n")
|
|
173
|
+
elif child.name in ["ol", "ul"]:
|
|
174
|
+
for li in child.find_all("li"):
|
|
175
|
+
output_lines.append("- " + li.get_text().strip() + "\n")
|
|
176
|
+
elif child.name == "div" and "footnotes" in child.get("class", []):
|
|
177
|
+
output_lines.append("\n**Authoritative Sources**\n")
|
|
178
|
+
for li in child.find_all("li"):
|
|
179
|
+
link = li.find("a")
|
|
180
|
+
if link:
|
|
181
|
+
output_lines.append(f"- {link.get_text().strip()} ({link.get('href')})\n")
|
|
182
|
+
|
|
183
|
+
return "".join(output_lines)
|
|
184
|
+
|
|
185
|
+
def search(
|
|
186
|
+
self,
|
|
187
|
+
prompt: str,
|
|
188
|
+
stream: bool = False,
|
|
189
|
+
raw: bool = False,
|
|
190
|
+
mode: Optional[ModeType] = None,
|
|
191
|
+
detail_level: Optional[DetailLevelType] = None,
|
|
192
|
+
) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
|
|
193
|
+
"""Search using the IAsk API and get AI-generated responses.
|
|
194
|
+
|
|
195
|
+
This method sends a search query to IAsk and returns the AI-generated response.
|
|
196
|
+
It supports both streaming and non-streaming modes, as well as raw response format.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
prompt (str): The search query or prompt to send to the API.
|
|
200
|
+
stream (bool, optional): If True, yields response chunks as they arrive.
|
|
201
|
+
If False, returns complete response. Defaults to False.
|
|
202
|
+
raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
|
|
203
|
+
If False, returns Response objects that convert to text automatically.
|
|
204
|
+
Defaults to False.
|
|
205
|
+
mode (ModeType, optional): Search mode to use. Defaults to None (uses instance default).
|
|
206
|
+
detail_level (DetailLevelType, optional): Detail level to use. Defaults to None (uses instance default).
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
|
|
210
|
+
- If stream=False: Returns complete response as Response object
|
|
211
|
+
- If stream=True: Yields response chunks as either Dict or Response objects
|
|
212
|
+
|
|
213
|
+
Raises:
|
|
214
|
+
APIConnectionError: If the API request fails
|
|
215
|
+
|
|
216
|
+
Examples:
|
|
217
|
+
Basic search:
|
|
218
|
+
>>> ai = IAsk()
|
|
219
|
+
>>> response = ai.search("What is Python?")
|
|
220
|
+
>>> print(response)
|
|
221
|
+
Python is a programming language...
|
|
222
|
+
|
|
223
|
+
Streaming response:
|
|
224
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
225
|
+
... print(chunk, end="")
|
|
226
|
+
Artificial Intelligence...
|
|
227
|
+
|
|
228
|
+
Raw response format:
|
|
229
|
+
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
230
|
+
... print(chunk)
|
|
231
|
+
{'text': 'Hello'}
|
|
232
|
+
{'text': ' there!'}
|
|
233
|
+
|
|
234
|
+
With specific mode and detail level:
|
|
235
|
+
>>> response = ai.search("Climate change", mode="academic", detail_level="detailed")
|
|
236
|
+
>>> print(response)
|
|
237
|
+
Climate change refers to...
|
|
238
|
+
"""
|
|
239
|
+
# Use provided parameters or fall back to instance defaults
|
|
240
|
+
search_mode = mode or self.default_mode
|
|
241
|
+
search_detail_level = detail_level or self.default_detail_level
|
|
242
|
+
|
|
243
|
+
# For non-streaming, run the async search and return the complete response
|
|
244
|
+
if not stream:
|
|
245
|
+
# Create a new event loop for this request
|
|
246
|
+
loop = asyncio.new_event_loop()
|
|
247
|
+
asyncio.set_event_loop(loop)
|
|
248
|
+
try:
|
|
249
|
+
result = loop.run_until_complete(
|
|
250
|
+
self._async_search(prompt, False, raw, search_mode, search_detail_level)
|
|
251
|
+
)
|
|
252
|
+
return result
|
|
253
|
+
finally:
|
|
254
|
+
loop.close()
|
|
255
|
+
|
|
256
|
+
# For streaming, use a simpler approach with a single event loop
|
|
257
|
+
# that stays open until the generator is exhausted
|
|
258
|
+
buffer = ""
|
|
259
|
+
|
|
260
|
+
def sync_generator():
|
|
261
|
+
nonlocal buffer
|
|
262
|
+
# Create a new event loop for this generator
|
|
263
|
+
loop = asyncio.new_event_loop()
|
|
264
|
+
asyncio.set_event_loop(loop)
|
|
265
|
+
|
|
266
|
+
try:
|
|
267
|
+
# Get the async generator
|
|
268
|
+
async_gen_coro = self._async_search(prompt, True, raw, search_mode, search_detail_level)
|
|
269
|
+
async_gen = loop.run_until_complete(async_gen_coro)
|
|
270
|
+
|
|
271
|
+
# Process chunks one by one
|
|
272
|
+
while True:
|
|
273
|
+
try:
|
|
274
|
+
# Get the next chunk
|
|
275
|
+
chunk_coro = async_gen.__anext__()
|
|
276
|
+
chunk = loop.run_until_complete(chunk_coro)
|
|
277
|
+
|
|
278
|
+
# Update buffer and yield the chunk
|
|
279
|
+
if isinstance(chunk, dict) and 'text' in chunk:
|
|
280
|
+
buffer += chunk['text']
|
|
281
|
+
elif isinstance(chunk, Response):
|
|
282
|
+
buffer += chunk.text
|
|
283
|
+
else:
|
|
284
|
+
buffer += str(chunk)
|
|
285
|
+
|
|
286
|
+
yield chunk
|
|
287
|
+
except StopAsyncIteration:
|
|
288
|
+
break
|
|
289
|
+
except Exception as e:
|
|
290
|
+
print(f"Error in generator: {e}")
|
|
291
|
+
break
|
|
292
|
+
finally:
|
|
293
|
+
# Store the final response and close the loop
|
|
294
|
+
self.last_response = {"text": buffer}
|
|
295
|
+
loop.close()
|
|
296
|
+
|
|
297
|
+
return sync_generator()
|
|
298
|
+
|
|
299
|
+
async def _async_search(
|
|
300
|
+
self,
|
|
301
|
+
prompt: str,
|
|
302
|
+
stream: bool = False,
|
|
303
|
+
raw: bool = False,
|
|
304
|
+
mode: ModeType = "question",
|
|
305
|
+
detail_level: Optional[DetailLevelType] = None,
|
|
306
|
+
) -> Union[Response, AsyncIterator[Union[Dict[str, str], Response]]]:
|
|
307
|
+
"""Internal async implementation of the search method."""
|
|
308
|
+
|
|
309
|
+
async def stream_generator() -> AsyncIterator[str]:
|
|
310
|
+
async with aiohttp.ClientSession() as session:
|
|
311
|
+
# Prepare parameters
|
|
312
|
+
params = {"mode": mode, "q": prompt}
|
|
313
|
+
if detail_level:
|
|
314
|
+
params["options[detail_level]"] = detail_level
|
|
315
|
+
|
|
316
|
+
try:
|
|
317
|
+
async with session.get(
|
|
318
|
+
self.api_endpoint,
|
|
319
|
+
params=params,
|
|
320
|
+
proxy=self.proxies.get('http') if self.proxies else None,
|
|
321
|
+
timeout=self.timeout
|
|
322
|
+
) as response:
|
|
323
|
+
if not response.ok:
|
|
324
|
+
raise exceptions.APIConnectionError(
|
|
325
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {await response.text()}"
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
etree = lxml.html.fromstring(await response.text())
|
|
329
|
+
phx_node = etree.xpath('//*[starts-with(@id, "phx-")]').pop()
|
|
330
|
+
csrf_token = (
|
|
331
|
+
etree.xpath('//*[@name="csrf-token"]').pop().get("content")
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
async with session.ws_connect(
|
|
335
|
+
f"{self.api_endpoint}live/websocket",
|
|
336
|
+
params={
|
|
337
|
+
"_csrf_token": csrf_token,
|
|
338
|
+
"vsn": "2.0.0",
|
|
339
|
+
},
|
|
340
|
+
proxy=self.proxies.get('http') if self.proxies else None,
|
|
341
|
+
timeout=self.timeout
|
|
342
|
+
) as wsResponse:
|
|
343
|
+
await wsResponse.send_json(
|
|
344
|
+
[
|
|
345
|
+
None,
|
|
346
|
+
None,
|
|
347
|
+
f"lv:{phx_node.get('id')}",
|
|
348
|
+
"phx_join",
|
|
349
|
+
{
|
|
350
|
+
"params": {"_csrf_token": csrf_token},
|
|
351
|
+
"url": str(response.url),
|
|
352
|
+
"session": phx_node.get("data-phx-session"),
|
|
353
|
+
},
|
|
354
|
+
]
|
|
355
|
+
)
|
|
356
|
+
while True:
|
|
357
|
+
json_data = await wsResponse.receive_json()
|
|
358
|
+
if not json_data:
|
|
359
|
+
break
|
|
360
|
+
diff: dict = json_data[4]
|
|
361
|
+
try:
|
|
362
|
+
chunk: str = diff["e"][0][1]["data"]
|
|
363
|
+
# Check if the chunk contains HTML content
|
|
364
|
+
if re.search(r"<[^>]+>", chunk):
|
|
365
|
+
formatted_chunk = self.format_html(chunk)
|
|
366
|
+
yield formatted_chunk
|
|
367
|
+
else:
|
|
368
|
+
yield chunk.replace("<br/>", "\n")
|
|
369
|
+
except:
|
|
370
|
+
cache = cache_find(diff)
|
|
371
|
+
if cache:
|
|
372
|
+
if diff.get("response", None):
|
|
373
|
+
# Format the cache content if it contains HTML
|
|
374
|
+
if re.search(r"<[^>]+>", cache):
|
|
375
|
+
formatted_cache = self.format_html(cache)
|
|
376
|
+
yield formatted_cache
|
|
377
|
+
else:
|
|
378
|
+
yield cache
|
|
379
|
+
break
|
|
380
|
+
except Exception as e:
|
|
381
|
+
raise exceptions.APIConnectionError(f"Error connecting to IAsk API: {str(e)}")
|
|
382
|
+
|
|
383
|
+
# For non-streaming, collect all chunks and return a single response
|
|
384
|
+
if not stream:
|
|
385
|
+
buffer = ""
|
|
386
|
+
async for chunk in stream_generator():
|
|
387
|
+
buffer += chunk
|
|
388
|
+
self.last_response = {"text": buffer}
|
|
389
|
+
return Response(buffer) if not raw else {"text": buffer}
|
|
390
|
+
|
|
391
|
+
# For streaming, create an async generator that yields chunks
|
|
392
|
+
async def process_stream():
|
|
393
|
+
buffer = ""
|
|
394
|
+
async for chunk in stream_generator():
|
|
395
|
+
buffer += chunk
|
|
396
|
+
if raw:
|
|
397
|
+
yield {"text": chunk}
|
|
398
|
+
else:
|
|
399
|
+
yield Response(chunk)
|
|
400
|
+
self.last_response = {"text": buffer}
|
|
401
|
+
|
|
402
|
+
# Return the async generator
|
|
403
|
+
return process_stream()
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
if __name__ == "__main__":
|
|
407
|
+
from rich import print
|
|
408
|
+
|
|
409
|
+
ai = IAsk()
|
|
410
|
+
|
|
411
|
+
# Example 1: Simple search with default mode
|
|
412
|
+
print("\n[bold cyan]Example 1: Simple search with default mode[/bold cyan]")
|
|
413
|
+
response = ai.search("What is Python?", stream=True)
|
|
414
|
+
for chunk in response:
|
|
415
|
+
print(chunk, end="", flush=True)
|
|
416
|
+
print("\n\n[bold green]Response complete.[/bold green]\n")
|
|
417
|
+
|
|
418
|
+
# Example 2: Search with academic mode
|
|
419
|
+
print("\n[bold cyan]Example 2: Search with academic mode[/bold cyan]")
|
|
420
|
+
response = ai.search("Quantum computing applications", mode="academic", stream=True)
|
|
421
|
+
for chunk in response:
|
|
422
|
+
print(chunk, end="", flush=True)
|
|
423
|
+
print("\n\n[bold green]Response complete.[/bold green]\n")
|
|
424
|
+
|
|
425
|
+
# Example 3: Search with advanced mode and detailed level
|
|
426
|
+
print("\n[bold cyan]Example 3: Search with advanced mode and detailed level[/bold cyan]")
|
|
427
|
+
response = ai.search("Climate change solutions", mode="advanced", detail_level="detailed", stream=True)
|
|
428
|
+
for chunk in response:
|
|
429
|
+
print(chunk, end="", flush=True)
|
|
430
|
+
print("\n\n[bold green]Response complete.[/bold green]\n")
|
|
431
|
+
|
|
432
|
+
# Example 4: Demonstrating the create_url method
|
|
433
|
+
print("\n[bold cyan]Example 4: Generated URL for browser access[/bold cyan]")
|
|
434
|
+
url = ai.create_url("Helpingai details", mode="question", detail_level="detailed")
|
|
435
|
+
print(f"URL: {url}")
|
|
436
|
+
print("This URL can be used directly in a browser or with other HTTP clients.")
|
|
@@ -7,7 +7,7 @@ from typing import Dict, Optional, Generator, Union, Any
|
|
|
7
7
|
|
|
8
8
|
from webscout.AIbase import AISearch
|
|
9
9
|
from webscout import exceptions
|
|
10
|
-
from webscout import LitAgent
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
class Response:
|
|
@@ -70,10 +70,14 @@ class Scira(AISearch):
|
|
|
70
70
|
|
|
71
71
|
AVAILABLE_MODELS = {
|
|
72
72
|
"scira-default": "Grok3",
|
|
73
|
-
"scira-grok-3-mini": "Grok3-mini",
|
|
74
|
-
"scira-vision": "Grok2-Vision",
|
|
75
|
-
"scira-
|
|
76
|
-
"scira-
|
|
73
|
+
"scira-grok-3-mini": "Grok3-mini", # thinking model
|
|
74
|
+
"scira-vision" : "Grok2-Vision", # vision model
|
|
75
|
+
"scira-4.1-mini": "GPT4.1-mini",
|
|
76
|
+
"scira-qwq": "QWQ-32B",
|
|
77
|
+
"scira-o4-mini": "o4-mini",
|
|
78
|
+
"scira-google": "gemini 2.5 flash"
|
|
79
|
+
|
|
80
|
+
|
|
77
81
|
}
|
|
78
82
|
|
|
79
83
|
def __init__(
|
webscout/Provider/ExaAI.py
CHANGED
|
@@ -155,7 +155,7 @@ class ExaAI(Provider):
|
|
|
155
155
|
payload = {
|
|
156
156
|
"id": conversation_id,
|
|
157
157
|
"messages": [
|
|
158
|
-
# {"role": "system", "content": self.system_prompt},
|
|
158
|
+
# {"role": "system", "content": self.system_prompt}, # system role not supported by this provider
|
|
159
159
|
{"role": "user", "content": conversation_prompt}
|
|
160
160
|
]
|
|
161
161
|
}
|
webscout/Provider/ExaChat.py
CHANGED
|
@@ -11,11 +11,11 @@ from webscout.litagent import LitAgent
|
|
|
11
11
|
# Model configurations
|
|
12
12
|
MODEL_CONFIGS = {
|
|
13
13
|
"exaanswer": {
|
|
14
|
-
"endpoint": "https://
|
|
14
|
+
"endpoint": "https://ayle.chat/api/exaanswer",
|
|
15
15
|
"models": ["exaanswer"],
|
|
16
16
|
},
|
|
17
17
|
"gemini": {
|
|
18
|
-
"endpoint": "https://
|
|
18
|
+
"endpoint": "https://ayle.chat/api/gemini",
|
|
19
19
|
"models": [
|
|
20
20
|
"gemini-2.0-flash",
|
|
21
21
|
"gemini-2.0-flash-exp-image-generation",
|
|
@@ -26,7 +26,7 @@ MODEL_CONFIGS = {
|
|
|
26
26
|
],
|
|
27
27
|
},
|
|
28
28
|
"openrouter": {
|
|
29
|
-
"endpoint": "https://
|
|
29
|
+
"endpoint": "https://ayle.chat/api/openrouter",
|
|
30
30
|
"models": [
|
|
31
31
|
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
32
32
|
"deepseek/deepseek-r1:free",
|
|
@@ -36,7 +36,7 @@ MODEL_CONFIGS = {
|
|
|
36
36
|
],
|
|
37
37
|
},
|
|
38
38
|
"groq": {
|
|
39
|
-
"endpoint": "https://
|
|
39
|
+
"endpoint": "https://ayle.chat/api/groq",
|
|
40
40
|
"models": [
|
|
41
41
|
"deepseek-r1-distill-llama-70b",
|
|
42
42
|
"deepseek-r1-distill-qwen-32b",
|
|
@@ -56,12 +56,18 @@ MODEL_CONFIGS = {
|
|
|
56
56
|
],
|
|
57
57
|
},
|
|
58
58
|
"cerebras": {
|
|
59
|
-
"endpoint": "https://
|
|
59
|
+
"endpoint": "https://ayle.chat/api/cerebras",
|
|
60
60
|
"models": [
|
|
61
61
|
"llama3.1-8b",
|
|
62
62
|
"llama-3.3-70b"
|
|
63
63
|
],
|
|
64
64
|
},
|
|
65
|
+
"xai": {
|
|
66
|
+
"endpoint": "https://ayle.chat/api/xai",
|
|
67
|
+
"models": [
|
|
68
|
+
"grok-3-mini-beta"
|
|
69
|
+
],
|
|
70
|
+
},
|
|
65
71
|
}
|
|
66
72
|
|
|
67
73
|
class ExaChat(Provider):
|
|
@@ -71,6 +77,9 @@ class ExaChat(Provider):
|
|
|
71
77
|
AVAILABLE_MODELS = [
|
|
72
78
|
# ExaAnswer Models
|
|
73
79
|
"exaanswer",
|
|
80
|
+
|
|
81
|
+
# XAI Models
|
|
82
|
+
"grok-3-mini-beta",
|
|
74
83
|
|
|
75
84
|
# Gemini Models
|
|
76
85
|
"gemini-2.0-flash",
|
|
@@ -106,7 +115,8 @@ class ExaChat(Provider):
|
|
|
106
115
|
|
|
107
116
|
# Cerebras Models
|
|
108
117
|
"llama3.1-8b",
|
|
109
|
-
"llama-3.3-70b"
|
|
118
|
+
"llama-3.3-70b",
|
|
119
|
+
|
|
110
120
|
]
|
|
111
121
|
|
|
112
122
|
def __init__(
|
|
@@ -150,8 +160,8 @@ class ExaChat(Provider):
|
|
|
150
160
|
"accept": "*/*",
|
|
151
161
|
"accept-language": "en-US,en;q=0.9",
|
|
152
162
|
"content-type": "application/json",
|
|
153
|
-
"origin": "https://
|
|
154
|
-
"referer": "https://
|
|
163
|
+
"origin": "https://ayle.chat/",
|
|
164
|
+
"referer": "https://ayle.chat/",
|
|
155
165
|
"user-agent": self.agent.random(),
|
|
156
166
|
}
|
|
157
167
|
|
webscout/Provider/GithubChat.py
CHANGED
webscout/Provider/Glider.py
CHANGED
|
@@ -13,9 +13,11 @@ class GliderAI(Provider):
|
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
15
|
AVAILABLE_MODELS = [
|
|
16
|
-
"chat-llama-3-1-70b",
|
|
17
16
|
"chat-llama-3-1-8b",
|
|
18
17
|
"chat-llama-3-2-3b",
|
|
18
|
+
"chat-deepseek-r1-qwen-32b",
|
|
19
|
+
"chat-qwen-2-5-7b",
|
|
20
|
+
"chat-qwen-qwq-32b",
|
|
19
21
|
"deepseek-ai/DeepSeek-R1",
|
|
20
22
|
]
|
|
21
23
|
|
|
@@ -30,7 +32,7 @@ class GliderAI(Provider):
|
|
|
30
32
|
proxies: dict = {},
|
|
31
33
|
history_offset: int = 10250,
|
|
32
34
|
act: Optional[str] = None,
|
|
33
|
-
model: str = "chat-llama-3-1-
|
|
35
|
+
model: str = "chat-llama-3-1-8b",
|
|
34
36
|
system_prompt: str = "You are a helpful AI assistant."
|
|
35
37
|
):
|
|
36
38
|
"""Initializes the GliderAI API client."""
|
webscout/Provider/Jadve.py
CHANGED
|
@@ -13,7 +13,7 @@ class JadveOpenAI(Provider):
|
|
|
13
13
|
A class to interact with the OpenAI API through jadve.com using the streaming endpoint.
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
|
-
AVAILABLE_MODELS = ["gpt-4o
|
|
16
|
+
AVAILABLE_MODELS = ["gpt-4o-mini"]
|
|
17
17
|
|
|
18
18
|
def __init__(
|
|
19
19
|
self,
|
|
@@ -26,7 +26,7 @@ class JadveOpenAI(Provider):
|
|
|
26
26
|
proxies: dict = {},
|
|
27
27
|
history_offset: int = 10250,
|
|
28
28
|
act: str = None,
|
|
29
|
-
model: str = "
|
|
29
|
+
model: str = "gpt-4o-mini",
|
|
30
30
|
system_prompt: str = "You are a helpful AI assistant."
|
|
31
31
|
):
|
|
32
32
|
"""
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# This file marks the directory as a Python package.
|
|
2
|
+
from .deepinfra import *
|
|
3
|
+
from .glider import *
|
|
4
|
+
from .chatgptclone import *
|
|
5
|
+
from .x0gpt import *
|
|
6
|
+
from .wisecat import *
|
|
7
|
+
from .venice import *
|
|
8
|
+
from .exaai import *
|
|
9
|
+
from .typegpt import *
|
|
10
|
+
from .scirachat import *
|
|
11
|
+
from .freeaichat import *
|
|
12
|
+
from .llmchatco import *
|
|
13
|
+
from .yep import * # Add YEPCHAT
|
|
14
|
+
from .heckai import *
|
|
15
|
+
from .sonus import *
|
|
16
|
+
from .exachat import *
|
|
17
|
+
from .netwrck import *
|
|
18
|
+
from .standardinput import *
|
|
19
|
+
from .writecream import *
|
|
20
|
+
from .toolbaz import *
|
|
21
|
+
from .uncovrAI import *
|
|
22
|
+
from .opkfc import *
|
|
23
|
+
from .chatgpt import *
|
|
24
|
+
from .textpollinations import *
|