webscout 1.4.5__tar.gz → 1.4.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- {webscout-1.4.5 → webscout-1.4.6}/PKG-INFO +9 -9
- {webscout-1.4.5 → webscout-1.4.6}/setup.py +9 -9
- {webscout-1.4.5 → webscout-1.4.6}/webscout/AI.py +1 -0
- webscout-1.4.6/webscout/AIauto.py +452 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/AIutel.py +3 -1
- {webscout-1.4.5 → webscout-1.4.6}/webscout/__init__.py +3 -2
- {webscout-1.4.5 → webscout-1.4.6}/webscout/async_providers.py +1 -11
- {webscout-1.4.5 → webscout-1.4.6}/webscout/exceptions.py +5 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/webai.py +24 -4
- {webscout-1.4.5 → webscout-1.4.6}/webscout/webscout_search.py +2 -8
- {webscout-1.4.5 → webscout-1.4.6}/webscout.egg-info/PKG-INFO +9 -9
- {webscout-1.4.5 → webscout-1.4.6}/webscout.egg-info/SOURCES.txt +1 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout.egg-info/requires.txt +8 -8
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/__init__.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/documents/__init__.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/documents/query_results_extractor.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/networks/__init__.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/networks/filepath_converter.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/networks/google_searcher.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/networks/network_configs.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/networks/webpage_fetcher.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/utilsdw/__init__.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/utilsdw/enver.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/DeepWEBS/utilsdw/logger.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/LICENSE.md +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/README.md +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/setup.cfg +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/AIbase.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/DWEBS.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/LLM.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/__main__.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/cli.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/g4f.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/models.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/tempid.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/transcriber.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/utils.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/version.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/voice.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout/webscout_search_async.py +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout.egg-info/dependency_links.txt +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout.egg-info/entry_points.txt +0 -0
- {webscout-1.4.5 → webscout-1.4.6}/webscout.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 1.4.
|
|
3
|
+
Version: 1.4.6
|
|
4
4
|
Summary: Search for anything using the Google, DuckDuckGo, phind.com. Also containes AI models, can transcribe yt videos, temporary email and phone number generation, have TTS support and webai(terminal gpt and open interpeter)
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -24,14 +24,14 @@ Classifier: Topic :: Internet :: WWW/HTTP :: Indexing/Search
|
|
|
24
24
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
25
25
|
Description-Content-Type: text/markdown
|
|
26
26
|
License-File: LICENSE.md
|
|
27
|
-
Requires-Dist: docstring_inheritance
|
|
28
|
-
Requires-Dist: click
|
|
29
|
-
Requires-Dist: curl_cffi
|
|
30
|
-
Requires-Dist: lxml
|
|
31
|
-
Requires-Dist: nest-asyncio
|
|
32
|
-
Requires-Dist: selenium
|
|
33
|
-
Requires-Dist: tqdm
|
|
34
|
-
Requires-Dist: webdriver-manager
|
|
27
|
+
Requires-Dist: docstring_inheritance
|
|
28
|
+
Requires-Dist: click
|
|
29
|
+
Requires-Dist: curl_cffi
|
|
30
|
+
Requires-Dist: lxml
|
|
31
|
+
Requires-Dist: nest-asyncio
|
|
32
|
+
Requires-Dist: selenium
|
|
33
|
+
Requires-Dist: tqdm
|
|
34
|
+
Requires-Dist: webdriver-manager
|
|
35
35
|
Requires-Dist: halo>=0.0.31
|
|
36
36
|
Requires-Dist: g4f>=0.2.2.3
|
|
37
37
|
Requires-Dist: rich
|
|
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
|
|
|
5
5
|
|
|
6
6
|
setup(
|
|
7
7
|
name="webscout",
|
|
8
|
-
version="1.4.
|
|
8
|
+
version="1.4.6",
|
|
9
9
|
description="Search for anything using the Google, DuckDuckGo, phind.com. Also containes AI models, can transcribe yt videos, temporary email and phone number generation, have TTS support and webai(terminal gpt and open interpeter)",
|
|
10
10
|
long_description=README,
|
|
11
11
|
long_description_content_type="text/markdown",
|
|
@@ -28,14 +28,14 @@ setup(
|
|
|
28
28
|
'Topic :: Software Development :: Libraries :: Python Modules',
|
|
29
29
|
],
|
|
30
30
|
install_requires=[
|
|
31
|
-
"docstring_inheritance
|
|
32
|
-
"click
|
|
33
|
-
"curl_cffi
|
|
34
|
-
"lxml
|
|
35
|
-
"nest-asyncio
|
|
36
|
-
"selenium
|
|
37
|
-
"tqdm
|
|
38
|
-
"webdriver-manager
|
|
31
|
+
"docstring_inheritance",
|
|
32
|
+
"click",
|
|
33
|
+
"curl_cffi",
|
|
34
|
+
"lxml",
|
|
35
|
+
"nest-asyncio",
|
|
36
|
+
"selenium",
|
|
37
|
+
"tqdm",
|
|
38
|
+
"webdriver-manager",
|
|
39
39
|
"halo>=0.0.31",
|
|
40
40
|
"g4f>=0.2.2.3",
|
|
41
41
|
"rich",
|
|
@@ -0,0 +1,452 @@
|
|
|
1
|
+
from webscout.AI import Provider, AsyncProvider
|
|
2
|
+
from webscout.AI import OPENGPT, AsyncOPENGPT
|
|
3
|
+
from webscout.AI import KOBOLDAI, AsyncKOBOLDAI
|
|
4
|
+
from webscout.AI import PhindSearch, AsyncPhindSearch
|
|
5
|
+
from webscout.AI import LLAMA2, AsyncLLAMA2
|
|
6
|
+
from webscout.AI import BLACKBOXAI, AsyncBLACKBOXAI
|
|
7
|
+
from webscout.AI import PERPLEXITY
|
|
8
|
+
from webscout.AI import ThinkAnyAI
|
|
9
|
+
from webscout.AI import YouChat
|
|
10
|
+
from webscout.AI import YEPCHAT
|
|
11
|
+
from webscout.g4f import GPT4FREE, AsyncGPT4FREE
|
|
12
|
+
from webscout.g4f import TestProviders
|
|
13
|
+
from webscout.exceptions import AllProvidersFailure
|
|
14
|
+
from webscout.async_providers import mapper as async_provider_map
|
|
15
|
+
from typing import AsyncGenerator
|
|
16
|
+
|
|
17
|
+
from typing import Union
|
|
18
|
+
from typing import Any
|
|
19
|
+
import logging
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
provider_map: dict[
|
|
23
|
+
str, Union[OPENGPT, KOBOLDAI, PhindSearch, LLAMA2, BLACKBOXAI, PERPLEXITY, GPT4FREE, ThinkAnyAI, YEPCHAT, YouChat]
|
|
24
|
+
] = {
|
|
25
|
+
"PhindSearch": PhindSearch,
|
|
26
|
+
"perplexity": PERPLEXITY,
|
|
27
|
+
"opengpt": OPENGPT,
|
|
28
|
+
"koboldai": KOBOLDAI,
|
|
29
|
+
"llama2": LLAMA2,
|
|
30
|
+
"blackboxai": BLACKBOXAI,
|
|
31
|
+
"gpt4free": GPT4FREE,
|
|
32
|
+
"thinkany": ThinkAnyAI,
|
|
33
|
+
"yepchat": YEPCHAT,
|
|
34
|
+
"you": YouChat,
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class AUTO(Provider):
|
|
39
|
+
def __init__(
|
|
40
|
+
self,
|
|
41
|
+
is_conversation: bool = True,
|
|
42
|
+
max_tokens: int = 600,
|
|
43
|
+
timeout: int = 30,
|
|
44
|
+
intro: str = None,
|
|
45
|
+
filepath: str = None,
|
|
46
|
+
update_file: bool = True,
|
|
47
|
+
proxies: dict = {},
|
|
48
|
+
history_offset: int = 10250,
|
|
49
|
+
act: str = None,
|
|
50
|
+
exclude: list[str] = [],
|
|
51
|
+
):
|
|
52
|
+
"""Instantiates AUTO
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
|
|
56
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
57
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
58
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
59
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
60
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
61
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
62
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
63
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
64
|
+
exclude(list[str], optional): List of providers to be excluded. Defaults to [].
|
|
65
|
+
"""
|
|
66
|
+
self.provider: Union[OPENGPT, KOBOLDAI, PhindSearch, LLAMA2, BLACKBOXAI, PERPLEXITY, GPT4FREE, ThinkAnyAI, YEPCHAT, YouChat] = None
|
|
67
|
+
self.provider_name: str = None
|
|
68
|
+
self.is_conversation = is_conversation
|
|
69
|
+
self.max_tokens = max_tokens
|
|
70
|
+
self.timeout = timeout
|
|
71
|
+
self.intro = intro
|
|
72
|
+
self.filepath = filepath
|
|
73
|
+
self.update_file = update_file
|
|
74
|
+
self.proxies = proxies
|
|
75
|
+
self.history_offset = history_offset
|
|
76
|
+
self.act = act
|
|
77
|
+
self.exclude = exclude
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def last_response(self) -> dict[str, Any]:
|
|
81
|
+
return self.provider.last_response
|
|
82
|
+
|
|
83
|
+
@property
|
|
84
|
+
def conversation(self) -> object:
|
|
85
|
+
return self.provider.conversation
|
|
86
|
+
|
|
87
|
+
def ask(
|
|
88
|
+
self,
|
|
89
|
+
prompt: str,
|
|
90
|
+
stream: bool = False,
|
|
91
|
+
raw: bool = False,
|
|
92
|
+
optimizer: str = None,
|
|
93
|
+
conversationally: bool = False,
|
|
94
|
+
run_new_test: bool = False,
|
|
95
|
+
) -> dict:
|
|
96
|
+
"""Chat with AI
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
prompt (str): Prompt to be send.
|
|
100
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
101
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
102
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
103
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
104
|
+
run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
|
|
105
|
+
Returns:
|
|
106
|
+
dict : {}
|
|
107
|
+
"""
|
|
108
|
+
ask_kwargs: dict[str, Union[str, bool]] = {
|
|
109
|
+
"prompt": prompt,
|
|
110
|
+
"stream": stream,
|
|
111
|
+
"raw": raw,
|
|
112
|
+
"optimizer": optimizer,
|
|
113
|
+
"conversationally": conversationally,
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
# tgpt-based providers
|
|
117
|
+
for provider_name, provider_obj in provider_map.items():
|
|
118
|
+
# continue
|
|
119
|
+
if provider_name in self.exclude:
|
|
120
|
+
continue
|
|
121
|
+
try:
|
|
122
|
+
self.provider_name = f"tgpt-{provider_name}"
|
|
123
|
+
self.provider = provider_obj(
|
|
124
|
+
is_conversation=self.is_conversation,
|
|
125
|
+
max_tokens=self.max_tokens,
|
|
126
|
+
timeout=self.timeout,
|
|
127
|
+
intro=self.intro,
|
|
128
|
+
filepath=self.filepath,
|
|
129
|
+
update_file=self.update_file,
|
|
130
|
+
proxies=self.proxies,
|
|
131
|
+
history_offset=self.history_offset,
|
|
132
|
+
act=self.act,
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
def for_stream():
|
|
136
|
+
for chunk in self.provider.ask(**ask_kwargs):
|
|
137
|
+
yield chunk
|
|
138
|
+
|
|
139
|
+
def for_non_stream():
|
|
140
|
+
return self.provider.ask(**ask_kwargs)
|
|
141
|
+
|
|
142
|
+
return for_stream() if stream else for_non_stream()
|
|
143
|
+
|
|
144
|
+
except Exception as e:
|
|
145
|
+
logging.debug(
|
|
146
|
+
f"Failed to generate response using provider {provider_name} - {e}"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# g4f-based providers
|
|
150
|
+
|
|
151
|
+
for provider_info in TestProviders(timeout=self.timeout).get_results(
|
|
152
|
+
run=run_new_test
|
|
153
|
+
):
|
|
154
|
+
if provider_info["name"] in self.exclude:
|
|
155
|
+
continue
|
|
156
|
+
try:
|
|
157
|
+
self.provider_name = f"g4f-{provider_info['name']}"
|
|
158
|
+
self.provider = GPT4FREE(
|
|
159
|
+
provider=provider_info["name"],
|
|
160
|
+
is_conversation=self.is_conversation,
|
|
161
|
+
max_tokens=self.max_tokens,
|
|
162
|
+
intro=self.intro,
|
|
163
|
+
filepath=self.filepath,
|
|
164
|
+
update_file=self.update_file,
|
|
165
|
+
proxies=self.proxies,
|
|
166
|
+
history_offset=self.history_offset,
|
|
167
|
+
act=self.act,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
def for_stream():
|
|
171
|
+
for chunk in self.provider.ask(**ask_kwargs):
|
|
172
|
+
yield chunk
|
|
173
|
+
|
|
174
|
+
def for_non_stream():
|
|
175
|
+
return self.provider.ask(**ask_kwargs)
|
|
176
|
+
|
|
177
|
+
return for_stream() if stream else for_non_stream()
|
|
178
|
+
|
|
179
|
+
except Exception as e:
|
|
180
|
+
logging.debug(
|
|
181
|
+
f"Failed to generate response using GPT4FREE-base provider {provider_name} - {e}"
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
raise AllProvidersFailure(
|
|
185
|
+
"None of the providers generated response successfully."
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
def chat(
|
|
189
|
+
self,
|
|
190
|
+
prompt: str,
|
|
191
|
+
stream: bool = False,
|
|
192
|
+
optimizer: str = None,
|
|
193
|
+
conversationally: bool = False,
|
|
194
|
+
run_new_test: bool = False,
|
|
195
|
+
) -> str:
|
|
196
|
+
"""Generate response `str`
|
|
197
|
+
Args:
|
|
198
|
+
prompt (str): Prompt to be send.
|
|
199
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
200
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
201
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
202
|
+
run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
|
|
203
|
+
Returns:
|
|
204
|
+
str: Response generated
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
def for_stream():
|
|
208
|
+
for response in self.ask(
|
|
209
|
+
prompt,
|
|
210
|
+
True,
|
|
211
|
+
optimizer=optimizer,
|
|
212
|
+
conversationally=conversationally,
|
|
213
|
+
run_new_test=run_new_test,
|
|
214
|
+
):
|
|
215
|
+
yield self.get_message(response)
|
|
216
|
+
|
|
217
|
+
def for_non_stream():
|
|
218
|
+
ask_response = self.ask(
|
|
219
|
+
prompt,
|
|
220
|
+
False,
|
|
221
|
+
optimizer=optimizer,
|
|
222
|
+
conversationally=conversationally,
|
|
223
|
+
run_new_test=run_new_test,
|
|
224
|
+
)
|
|
225
|
+
return self.get_message(ask_response)
|
|
226
|
+
|
|
227
|
+
return for_stream() if stream else for_non_stream()
|
|
228
|
+
|
|
229
|
+
def get_message(self, response: dict) -> str:
|
|
230
|
+
"""Retrieves message only from response
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
response (dict): Response generated by `self.ask`
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
str: Message extracted
|
|
237
|
+
"""
|
|
238
|
+
assert self.provider is not None, "Chat with AI first"
|
|
239
|
+
return self.provider.get_message(response)
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
class AsyncAUTO(AsyncProvider):
|
|
243
|
+
def __init__(
|
|
244
|
+
self,
|
|
245
|
+
is_conversation: bool = True,
|
|
246
|
+
max_tokens: int = 600,
|
|
247
|
+
timeout: int = 30,
|
|
248
|
+
intro: str = None,
|
|
249
|
+
filepath: str = None,
|
|
250
|
+
update_file: bool = True,
|
|
251
|
+
proxies: dict = {},
|
|
252
|
+
history_offset: int = 10250,
|
|
253
|
+
act: str = None,
|
|
254
|
+
exclude: list[str] = [],
|
|
255
|
+
):
|
|
256
|
+
"""Instantiates AsyncAUTO
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
|
|
260
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
261
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
262
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
263
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
264
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
265
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
266
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
267
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
268
|
+
exclude(list[str], optional): List of providers to be excluded. Defaults to [].
|
|
269
|
+
"""
|
|
270
|
+
self.provider: Union[
|
|
271
|
+
AsyncOPENGPT,
|
|
272
|
+
AsyncKOBOLDAI,
|
|
273
|
+
AsyncPhindSearch,
|
|
274
|
+
AsyncLLAMA2,
|
|
275
|
+
AsyncBLACKBOXAI,
|
|
276
|
+
AsyncGPT4FREE,
|
|
277
|
+
] = None
|
|
278
|
+
self.provider_name: str = None
|
|
279
|
+
self.is_conversation = is_conversation
|
|
280
|
+
self.max_tokens = max_tokens
|
|
281
|
+
self.timeout = timeout
|
|
282
|
+
self.intro = intro
|
|
283
|
+
self.filepath = filepath
|
|
284
|
+
self.update_file = update_file
|
|
285
|
+
self.proxies = proxies
|
|
286
|
+
self.history_offset = history_offset
|
|
287
|
+
self.act = act
|
|
288
|
+
self.exclude = exclude
|
|
289
|
+
|
|
290
|
+
@property
|
|
291
|
+
def last_response(self) -> dict[str, Any]:
|
|
292
|
+
return self.provider.last_response
|
|
293
|
+
|
|
294
|
+
@property
|
|
295
|
+
def conversation(self) -> object:
|
|
296
|
+
return self.provider.conversation
|
|
297
|
+
|
|
298
|
+
async def ask(
|
|
299
|
+
self,
|
|
300
|
+
prompt: str,
|
|
301
|
+
stream: bool = False,
|
|
302
|
+
raw: bool = False,
|
|
303
|
+
optimizer: str = None,
|
|
304
|
+
conversationally: bool = False,
|
|
305
|
+
run_new_test: bool = False,
|
|
306
|
+
) -> dict | AsyncGenerator:
|
|
307
|
+
"""Chat with AI asynchronously.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
prompt (str): Prompt to be send.
|
|
311
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
312
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
313
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
314
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
315
|
+
run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
|
|
316
|
+
Returns:
|
|
317
|
+
dict|AsyncGenerator : ai response.
|
|
318
|
+
"""
|
|
319
|
+
ask_kwargs: dict[str, Union[str, bool]] = {
|
|
320
|
+
"prompt": prompt,
|
|
321
|
+
"stream": stream,
|
|
322
|
+
"raw": raw,
|
|
323
|
+
"optimizer": optimizer,
|
|
324
|
+
"conversationally": conversationally,
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
# tgpt-based providers
|
|
328
|
+
for provider_name, provider_obj in async_provider_map.items():
|
|
329
|
+
if provider_name in self.exclude:
|
|
330
|
+
continue
|
|
331
|
+
try:
|
|
332
|
+
self.provider_name = f"tgpt-{provider_name}"
|
|
333
|
+
self.provider = provider_obj(
|
|
334
|
+
is_conversation=self.is_conversation,
|
|
335
|
+
max_tokens=self.max_tokens,
|
|
336
|
+
timeout=self.timeout,
|
|
337
|
+
intro=self.intro,
|
|
338
|
+
filepath=self.filepath,
|
|
339
|
+
update_file=self.update_file,
|
|
340
|
+
proxies=self.proxies,
|
|
341
|
+
history_offset=self.history_offset,
|
|
342
|
+
act=self.act,
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
async def for_stream():
|
|
346
|
+
async_ask = await self.provider.ask(**ask_kwargs)
|
|
347
|
+
async for chunk in async_ask:
|
|
348
|
+
yield chunk
|
|
349
|
+
|
|
350
|
+
async def for_non_stream():
|
|
351
|
+
return await self.provider.ask(**ask_kwargs)
|
|
352
|
+
|
|
353
|
+
return for_stream() if stream else await for_non_stream()
|
|
354
|
+
|
|
355
|
+
except Exception as e:
|
|
356
|
+
logging.debug(
|
|
357
|
+
f"Failed to generate response using provider {provider_name} - {e}"
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
# g4f-based providers
|
|
361
|
+
|
|
362
|
+
for provider_info in TestProviders(timeout=self.timeout).get_results(
|
|
363
|
+
run=run_new_test
|
|
364
|
+
):
|
|
365
|
+
if provider_info["name"] in self.exclude:
|
|
366
|
+
continue
|
|
367
|
+
try:
|
|
368
|
+
self.provider_name = f"g4f-{provider_info['name']}"
|
|
369
|
+
self.provider = AsyncGPT4FREE(
|
|
370
|
+
provider=provider_info["name"],
|
|
371
|
+
is_conversation=self.is_conversation,
|
|
372
|
+
max_tokens=self.max_tokens,
|
|
373
|
+
intro=self.intro,
|
|
374
|
+
filepath=self.filepath,
|
|
375
|
+
update_file=self.update_file,
|
|
376
|
+
proxies=self.proxies,
|
|
377
|
+
history_offset=self.history_offset,
|
|
378
|
+
act=self.act,
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
async def for_stream():
|
|
382
|
+
async_ask = await self.provider.ask(**ask_kwargs)
|
|
383
|
+
async for chunk in async_ask:
|
|
384
|
+
yield chunk
|
|
385
|
+
|
|
386
|
+
async def for_non_stream():
|
|
387
|
+
return await self.provider.ask(**ask_kwargs)
|
|
388
|
+
|
|
389
|
+
return for_stream() if stream else await for_non_stream()
|
|
390
|
+
|
|
391
|
+
except Exception as e:
|
|
392
|
+
logging.debug(
|
|
393
|
+
f"Failed to generate response using GPT4FREE-base provider {provider_name} - {e}"
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
raise AllProvidersFailure(
|
|
397
|
+
"None of the providers generated response successfully."
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
async def chat(
|
|
401
|
+
self,
|
|
402
|
+
prompt: str,
|
|
403
|
+
stream: bool = False,
|
|
404
|
+
optimizer: str = None,
|
|
405
|
+
conversationally: bool = False,
|
|
406
|
+
run_new_test: bool = False,
|
|
407
|
+
) -> str | AsyncGenerator:
|
|
408
|
+
"""Generate response `str` asynchronously.
|
|
409
|
+
Args:
|
|
410
|
+
prompt (str): Prompt to be send.
|
|
411
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
412
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
413
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
414
|
+
run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
|
|
415
|
+
Returns:
|
|
416
|
+
str|AsyncGenerator: Response generated
|
|
417
|
+
"""
|
|
418
|
+
|
|
419
|
+
async def for_stream():
|
|
420
|
+
async_ask = await self.ask(
|
|
421
|
+
prompt,
|
|
422
|
+
True,
|
|
423
|
+
optimizer=optimizer,
|
|
424
|
+
conversationally=conversationally,
|
|
425
|
+
run_new_test=run_new_test,
|
|
426
|
+
)
|
|
427
|
+
async for response in async_ask:
|
|
428
|
+
yield await self.get_message(response)
|
|
429
|
+
|
|
430
|
+
async def for_non_stream():
|
|
431
|
+
ask_response = await self.ask(
|
|
432
|
+
prompt,
|
|
433
|
+
False,
|
|
434
|
+
optimizer=optimizer,
|
|
435
|
+
conversationally=conversationally,
|
|
436
|
+
run_new_test=run_new_test,
|
|
437
|
+
)
|
|
438
|
+
return await self.get_message(ask_response)
|
|
439
|
+
|
|
440
|
+
return for_stream() if stream else await for_non_stream()
|
|
441
|
+
|
|
442
|
+
async def get_message(self, response: dict) -> str:
|
|
443
|
+
"""Retrieves message only from response
|
|
444
|
+
|
|
445
|
+
Args:
|
|
446
|
+
response (dict): Response generated by `self.ask`
|
|
447
|
+
|
|
448
|
+
Returns:
|
|
449
|
+
str: Message extracted
|
|
450
|
+
"""
|
|
451
|
+
assert self.provider is not None, "Chat with AI first"
|
|
452
|
+
return await self.provider.get_message(response)
|
|
@@ -11,7 +11,6 @@ from .DWEBS import DeepWEBS
|
|
|
11
11
|
from .transcriber import transcriber
|
|
12
12
|
from .voice import play_audio
|
|
13
13
|
|
|
14
|
-
|
|
15
14
|
__repo__ = "https://github.com/OE-LUCIFER/Webscout"
|
|
16
15
|
|
|
17
16
|
webai = [
|
|
@@ -30,7 +29,9 @@ webai = [
|
|
|
30
29
|
"yepchat",
|
|
31
30
|
"you",
|
|
32
31
|
"xjai",
|
|
33
|
-
"thinkany"
|
|
32
|
+
"thinkany",
|
|
33
|
+
"auto",
|
|
34
|
+
|
|
34
35
|
]
|
|
35
36
|
|
|
36
37
|
gpt4free_providers = [
|
|
@@ -7,7 +7,7 @@ from webscout.AI import AsyncLEO
|
|
|
7
7
|
from webscout.AI import AsyncKOBOLDAI
|
|
8
8
|
from webscout.AI import AsyncGROQ
|
|
9
9
|
from webscout.AI import AsyncBLACKBOXAI
|
|
10
|
-
from webscout.
|
|
10
|
+
from webscout.g4f import AsyncGPT4FREE
|
|
11
11
|
|
|
12
12
|
mapper: dict[str, object] = {
|
|
13
13
|
"phind": AsyncPhindSearch,
|
|
@@ -21,13 +21,3 @@ mapper: dict[str, object] = {
|
|
|
21
21
|
"groq": AsyncGROQ,
|
|
22
22
|
"openai": AsyncOPENAI,
|
|
23
23
|
}
|
|
24
|
-
|
|
25
|
-
tgpt_mapper: dict[str, object] = {
|
|
26
|
-
"phind": AsyncPhindSearch,
|
|
27
|
-
"opengpt": AsyncOPENGPT,
|
|
28
|
-
"koboldai": AsyncKOBOLDAI,
|
|
29
|
-
# "gpt4free": AsyncGPT4FREE,
|
|
30
|
-
"blackboxai": AsyncBLACKBOXAI,
|
|
31
|
-
"llama2": AsyncLLAMA2,
|
|
32
|
-
"yepchat": AsyncYEPCHAT,
|
|
33
|
-
}
|
|
@@ -10,4 +10,9 @@ class TimeoutE(Exception):
|
|
|
10
10
|
"""Raised for timeout errors during API requests."""
|
|
11
11
|
|
|
12
12
|
class FailedToGenerateResponseError(Exception):
|
|
13
|
+
|
|
13
14
|
"""Provider failed to fetch response"""
|
|
15
|
+
class AllProvidersFailure(Exception):
|
|
16
|
+
"""None of the providers generated response successfully"""
|
|
17
|
+
|
|
18
|
+
pass
|
|
@@ -456,7 +456,20 @@ class Main(cmd.Cmd):
|
|
|
456
456
|
history_offset=history_offset,
|
|
457
457
|
act=awesome_prompt,
|
|
458
458
|
)
|
|
459
|
+
if provider == "auto":
|
|
460
|
+
from webscout.AIauto import AUTO
|
|
459
461
|
|
|
462
|
+
self.bot = AUTO(
|
|
463
|
+
is_conversation=disable_conversation,
|
|
464
|
+
max_tokens=max_tokens,
|
|
465
|
+
timeout=timeout,
|
|
466
|
+
intro=intro,
|
|
467
|
+
filepath=filepath,
|
|
468
|
+
update_file=update_file,
|
|
469
|
+
proxies=proxies,
|
|
470
|
+
history_offset=history_offset,
|
|
471
|
+
act=awesome_prompt,
|
|
472
|
+
)
|
|
460
473
|
elif provider == "opengpt":
|
|
461
474
|
from webscout.AI import OPENGPT
|
|
462
475
|
|
|
@@ -470,6 +483,7 @@ class Main(cmd.Cmd):
|
|
|
470
483
|
proxies=proxies,
|
|
471
484
|
history_offset=history_offset,
|
|
472
485
|
act=awesome_prompt,
|
|
486
|
+
assistant_id="bca37014-6f97-4f2b-8928-81ea8d478d88"
|
|
473
487
|
)
|
|
474
488
|
elif provider == "thinkany":
|
|
475
489
|
from webscout.AI import ThinkAnyAI
|
|
@@ -725,7 +739,13 @@ class Main(cmd.Cmd):
|
|
|
725
739
|
self.__init_time = time.time()
|
|
726
740
|
self.__start_time = time.time()
|
|
727
741
|
self.__end_time = time.time()
|
|
728
|
-
|
|
742
|
+
|
|
743
|
+
@property
|
|
744
|
+
def get_provider(self):
|
|
745
|
+
if self.provider == "auto" and self.bot.provider_name is not None:
|
|
746
|
+
return self.bot.provider_name
|
|
747
|
+
else:
|
|
748
|
+
return self.provider
|
|
729
749
|
@property
|
|
730
750
|
def prompt(self):
|
|
731
751
|
current_time = datetime.datetime.now().strftime("%H:%M:%S")
|
|
@@ -740,7 +760,7 @@ class Main(cmd.Cmd):
|
|
|
740
760
|
if not self.disable_coloring:
|
|
741
761
|
cmd_prompt = (
|
|
742
762
|
f"╭─[`{Fore.GREEN}{getpass.getuser().capitalize()}@webai]`"
|
|
743
|
-
f"(`{Fore.YELLOW}{self.
|
|
763
|
+
f"(`{Fore.YELLOW}{self.get_provider})`"
|
|
744
764
|
f"~[`{Fore.LIGHTWHITE_EX}⏰{Fore.MAGENTA}{current_time}-`"
|
|
745
765
|
f"{Fore.LIGHTWHITE_EX}💻{Fore.BLUE}{find_range(self.__init_time, time.time(), True)}-`"
|
|
746
766
|
f"{Fore.LIGHTWHITE_EX}⚡️{Fore.RED}{find_range(self.__start_time, self.__end_time)}s]`"
|
|
@@ -753,7 +773,7 @@ class Main(cmd.Cmd):
|
|
|
753
773
|
|
|
754
774
|
else:
|
|
755
775
|
return (
|
|
756
|
-
f"╭─[{getpass.getuser().capitalize()}@webscout]({self.
|
|
776
|
+
f"╭─[{getpass.getuser().capitalize()}@webscout]({self.get_provider})"
|
|
757
777
|
f"~[⏰{current_time}"
|
|
758
778
|
f"-💻{find_range(self.__init_time, time.time(), True)}"
|
|
759
779
|
f"-⚡️{find_range(self.__start_time, self.__end_time)}s]"
|
|
@@ -1125,7 +1145,7 @@ class Main(cmd.Cmd):
|
|
|
1125
1145
|
busy_bar.stop_spinning()
|
|
1126
1146
|
this.stream_output(
|
|
1127
1147
|
generated_response,
|
|
1128
|
-
title="
|
|
1148
|
+
title="Webscout",
|
|
1129
1149
|
is_markdown=self.prettify,
|
|
1130
1150
|
style=Style(
|
|
1131
1151
|
color=self.color,
|
|
@@ -4,12 +4,6 @@ from threading import Thread
|
|
|
4
4
|
import sys
|
|
5
5
|
from types import TracebackType
|
|
6
6
|
from typing import Any, Awaitable, Dict, Optional, Type, Union
|
|
7
|
-
if sys.platform == 'win32':
|
|
8
|
-
try:
|
|
9
|
-
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
|
10
|
-
except AttributeError:
|
|
11
|
-
# If WindowsSelectorEventLoopPolicy is not available, do nothing
|
|
12
|
-
pass
|
|
13
7
|
from .webscout_search_async import AsyncWEBS
|
|
14
8
|
|
|
15
9
|
|
|
@@ -24,7 +18,7 @@ class WEBS(AsyncWEBS):
|
|
|
24
18
|
proxies: Union[Dict[str, str], str, None] = None, # deprecated
|
|
25
19
|
timeout: Optional[int] = 10,
|
|
26
20
|
) -> None:
|
|
27
|
-
"""Initialize the
|
|
21
|
+
"""Initialize the WEBS object.
|
|
28
22
|
|
|
29
23
|
Args:
|
|
30
24
|
headers (dict, optional): Dictionary of headers for the HTTP client. Defaults to None.
|
|
@@ -81,4 +75,4 @@ class WEBS(AsyncWEBS):
|
|
|
81
75
|
return self._run_async_in_thread(super().maps(*args, **kwargs))
|
|
82
76
|
|
|
83
77
|
def translate(self, *args: Any, **kwargs: Any) -> Any:
|
|
84
|
-
return self._run_async_in_thread(super().translate(*args, **kwargs))
|
|
78
|
+
return self._run_async_in_thread(super().translate(*args, **kwargs))
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 1.4.
|
|
3
|
+
Version: 1.4.6
|
|
4
4
|
Summary: Search for anything using the Google, DuckDuckGo, phind.com. Also containes AI models, can transcribe yt videos, temporary email and phone number generation, have TTS support and webai(terminal gpt and open interpeter)
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -24,14 +24,14 @@ Classifier: Topic :: Internet :: WWW/HTTP :: Indexing/Search
|
|
|
24
24
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
25
25
|
Description-Content-Type: text/markdown
|
|
26
26
|
License-File: LICENSE.md
|
|
27
|
-
Requires-Dist: docstring_inheritance
|
|
28
|
-
Requires-Dist: click
|
|
29
|
-
Requires-Dist: curl_cffi
|
|
30
|
-
Requires-Dist: lxml
|
|
31
|
-
Requires-Dist: nest-asyncio
|
|
32
|
-
Requires-Dist: selenium
|
|
33
|
-
Requires-Dist: tqdm
|
|
34
|
-
Requires-Dist: webdriver-manager
|
|
27
|
+
Requires-Dist: docstring_inheritance
|
|
28
|
+
Requires-Dist: click
|
|
29
|
+
Requires-Dist: curl_cffi
|
|
30
|
+
Requires-Dist: lxml
|
|
31
|
+
Requires-Dist: nest-asyncio
|
|
32
|
+
Requires-Dist: selenium
|
|
33
|
+
Requires-Dist: tqdm
|
|
34
|
+
Requires-Dist: webdriver-manager
|
|
35
35
|
Requires-Dist: halo>=0.0.31
|
|
36
36
|
Requires-Dist: g4f>=0.2.2.3
|
|
37
37
|
Requires-Dist: rich
|
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
docstring_inheritance
|
|
2
|
-
click
|
|
3
|
-
curl_cffi
|
|
4
|
-
lxml
|
|
5
|
-
nest-asyncio
|
|
6
|
-
selenium
|
|
7
|
-
tqdm
|
|
8
|
-
webdriver-manager
|
|
1
|
+
docstring_inheritance
|
|
2
|
+
click
|
|
3
|
+
curl_cffi
|
|
4
|
+
lxml
|
|
5
|
+
nest-asyncio
|
|
6
|
+
selenium
|
|
7
|
+
tqdm
|
|
8
|
+
webdriver-manager
|
|
9
9
|
halo>=0.0.31
|
|
10
10
|
g4f>=0.2.2.3
|
|
11
11
|
rich
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|