webscout 1.3.8__py3-none-any.whl → 1.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AI.py +3132 -896
- webscout/AIbase.py +69 -1
- webscout/AIutel.py +19 -2
- webscout/__init__.py +2 -2
- webscout/async_providers.py +33 -0
- webscout/exceptions.py +4 -1
- webscout/g4f.py +193 -1
- webscout/version.py +1 -1
- webscout/webai.py +20 -2
- {webscout-1.3.8.dist-info → webscout-1.3.9.dist-info}/METADATA +7 -19
- {webscout-1.3.8.dist-info → webscout-1.3.9.dist-info}/RECORD +15 -14
- {webscout-1.3.8.dist-info → webscout-1.3.9.dist-info}/LICENSE.md +0 -0
- {webscout-1.3.8.dist-info → webscout-1.3.9.dist-info}/WHEEL +0 -0
- {webscout-1.3.8.dist-info → webscout-1.3.9.dist-info}/entry_points.txt +0 -0
- {webscout-1.3.8.dist-info → webscout-1.3.9.dist-info}/top_level.txt +0 -0
webscout/AIbase.py
CHANGED
|
@@ -3,7 +3,7 @@ from abc import abstractmethod
|
|
|
3
3
|
|
|
4
4
|
|
|
5
5
|
class Provider(ABC):
|
|
6
|
-
"""Base class for
|
|
6
|
+
"""Base class for providers"""
|
|
7
7
|
|
|
8
8
|
@abstractmethod
|
|
9
9
|
def ask(
|
|
@@ -61,6 +61,74 @@ class Provider(ABC):
|
|
|
61
61
|
def get_message(self, response: dict) -> str:
|
|
62
62
|
"""Retrieves message only from response
|
|
63
63
|
|
|
64
|
+
Args:
|
|
65
|
+
response (dict): Response generated by `self.ask`
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
str: Message extracted
|
|
69
|
+
"""
|
|
70
|
+
raise NotImplementedError("Method needs to be implemented in subclass")
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class AsyncProvider(ABC):
|
|
74
|
+
"""Asynchronous base class for providers"""
|
|
75
|
+
|
|
76
|
+
@abstractmethod
|
|
77
|
+
async def ask(
|
|
78
|
+
self,
|
|
79
|
+
prompt: str,
|
|
80
|
+
stream: bool = False,
|
|
81
|
+
raw: bool = False,
|
|
82
|
+
optimizer: str = None,
|
|
83
|
+
conversationally: bool = False,
|
|
84
|
+
) -> dict:
|
|
85
|
+
"""Asynchronously chat with AI
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
prompt (str): Prompt to be sent
|
|
89
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
90
|
+
raw (bool, optional): Stream back raw response as received
|
|
91
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`
|
|
92
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
93
|
+
Returns:
|
|
94
|
+
dict : {}
|
|
95
|
+
```json
|
|
96
|
+
{
|
|
97
|
+
"completion": "\nNext: domestic cat breeds with short hair >>",
|
|
98
|
+
"stop_reason": null,
|
|
99
|
+
"truncated": false,
|
|
100
|
+
"stop": null,
|
|
101
|
+
"model": "llama-2-13b-chat",
|
|
102
|
+
"log_id": "cmpl-3kYiYxSNDvgMShSzFooz6t",
|
|
103
|
+
"exception": null
|
|
104
|
+
}
|
|
105
|
+
```
|
|
106
|
+
"""
|
|
107
|
+
raise NotImplementedError("Method needs to be implemented in subclass")
|
|
108
|
+
|
|
109
|
+
@abstractmethod
|
|
110
|
+
async def chat(
|
|
111
|
+
self,
|
|
112
|
+
prompt: str,
|
|
113
|
+
stream: bool = False,
|
|
114
|
+
optimizer: str = None,
|
|
115
|
+
conversationally: bool = False,
|
|
116
|
+
) -> str:
|
|
117
|
+
"""Asynchronously generate response `str`
|
|
118
|
+
Args:
|
|
119
|
+
prompt (str): Prompt to be sent
|
|
120
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
121
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`
|
|
122
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
123
|
+
Returns:
|
|
124
|
+
str: Response generated
|
|
125
|
+
"""
|
|
126
|
+
raise NotImplementedError("Method needs to be implemented in subclass")
|
|
127
|
+
|
|
128
|
+
@abstractmethod
|
|
129
|
+
async def get_message(self, response: dict) -> str:
|
|
130
|
+
"""Asynchronously retrieves message only from response
|
|
131
|
+
|
|
64
132
|
Args:
|
|
65
133
|
response (dict): Response generated by `self.ask`
|
|
66
134
|
|
webscout/AIutel.py
CHANGED
|
@@ -34,10 +34,10 @@ webai = [
|
|
|
34
34
|
"blackboxai",
|
|
35
35
|
"g4fauto",
|
|
36
36
|
"perplexity",
|
|
37
|
-
"sean",
|
|
38
37
|
"groq",
|
|
39
38
|
"reka",
|
|
40
|
-
"cohere"
|
|
39
|
+
"cohere",
|
|
40
|
+
"yepchat",
|
|
41
41
|
]
|
|
42
42
|
|
|
43
43
|
gpt4free_providers = [
|
|
@@ -45,7 +45,24 @@ gpt4free_providers = [
|
|
|
45
45
|
]
|
|
46
46
|
|
|
47
47
|
available_providers = webai + gpt4free_providers
|
|
48
|
+
def sanitize_stream(
|
|
49
|
+
chunk: str, intro_value: str = "data:", to_json: bool = True
|
|
50
|
+
) -> str | dict:
|
|
51
|
+
"""Remove streaming flags
|
|
48
52
|
|
|
53
|
+
Args:
|
|
54
|
+
chunk (str): Streamig chunk.
|
|
55
|
+
intro_value (str, optional): streaming flag. Defaults to "data:".
|
|
56
|
+
to_json (bool, optional). Return chunk as dictionary. Defaults to True.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
str: Sanitized streaming value.
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
if chunk.startswith(intro_value):
|
|
63
|
+
chunk = chunk[len(intro_value) :]
|
|
64
|
+
|
|
65
|
+
return json.loads(chunk) if to_json else chunk
|
|
49
66
|
def run_system_command(
|
|
50
67
|
command: str,
|
|
51
68
|
exit_on_error: bool = True,
|
webscout/__init__.py
CHANGED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from webscout.AI import AsyncPhindSearch
|
|
2
|
+
from webscout.AI import AsyncYEPCHAT
|
|
3
|
+
from webscout.AI import AsyncOPENGPT
|
|
4
|
+
from webscout.AI import AsyncOPENAI
|
|
5
|
+
from webscout.AI import AsyncLLAMA2
|
|
6
|
+
from webscout.AI import AsyncLEO
|
|
7
|
+
from webscout.AI import AsyncKOBOLDAI
|
|
8
|
+
from webscout.AI import AsyncGROQ
|
|
9
|
+
from webscout.AI import AsyncBLACKBOXAI
|
|
10
|
+
from webscout.AI import AsyncGPT4FREE
|
|
11
|
+
|
|
12
|
+
mapper: dict[str, object] = {
|
|
13
|
+
"phind": AsyncPhindSearch,
|
|
14
|
+
"opengpt": AsyncOPENGPT,
|
|
15
|
+
"koboldai": AsyncKOBOLDAI,
|
|
16
|
+
"blackboxai": AsyncBLACKBOXAI,
|
|
17
|
+
"gpt4free": AsyncGPT4FREE,
|
|
18
|
+
"llama2": AsyncLLAMA2,
|
|
19
|
+
"yepchat": AsyncYEPCHAT,
|
|
20
|
+
"leo": AsyncLEO,
|
|
21
|
+
"groq": AsyncGROQ,
|
|
22
|
+
"openai": AsyncOPENAI,
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
tgpt_mapper: dict[str, object] = {
|
|
26
|
+
"phind": AsyncPhindSearch,
|
|
27
|
+
"opengpt": AsyncOPENGPT,
|
|
28
|
+
"koboldai": AsyncKOBOLDAI,
|
|
29
|
+
# "gpt4free": AsyncGPT4FREE,
|
|
30
|
+
"blackboxai": AsyncBLACKBOXAI,
|
|
31
|
+
"llama2": AsyncLLAMA2,
|
|
32
|
+
"yepchat": AsyncYEPCHAT,
|
|
33
|
+
}
|
webscout/exceptions.py
CHANGED
|
@@ -7,4 +7,7 @@ class RatelimitE(Exception):
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class TimeoutE(Exception):
|
|
10
|
-
"""Raised for timeout errors during API requests."""
|
|
10
|
+
"""Raised for timeout errors during API requests."""
|
|
11
|
+
|
|
12
|
+
class FailedToGenerateResponseError(Exception):
|
|
13
|
+
"""Provider failed to fetch response"""
|
webscout/g4f.py
CHANGED
|
@@ -2,7 +2,7 @@ import g4f
|
|
|
2
2
|
from webscout.AIutel import Optimizers
|
|
3
3
|
from webscout.AIutel import Conversation
|
|
4
4
|
from webscout.AIutel import AwesomePrompts
|
|
5
|
-
from webscout.AIbase import Provider
|
|
5
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
6
6
|
from webscout.AIutel import available_providers
|
|
7
7
|
|
|
8
8
|
|
|
@@ -26,7 +26,199 @@ default_models = {
|
|
|
26
26
|
|
|
27
27
|
default_provider = "Koala"
|
|
28
28
|
|
|
29
|
+
class AsyncGPT4FREE(AsyncProvider):
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
provider: str = default_provider,
|
|
33
|
+
is_conversation: bool = True,
|
|
34
|
+
auth: str = None,
|
|
35
|
+
max_tokens: int = 600,
|
|
36
|
+
model: str = None,
|
|
37
|
+
ignore_working: bool = False,
|
|
38
|
+
timeout: int = 30,
|
|
39
|
+
intro: str = None,
|
|
40
|
+
filepath: str = None,
|
|
41
|
+
update_file: bool = True,
|
|
42
|
+
proxies: dict = {},
|
|
43
|
+
history_offset: int = 10250,
|
|
44
|
+
act: str = None,
|
|
45
|
+
):
|
|
46
|
+
"""Initialies GPT4FREE
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
provider (str, optional): gpt4free based provider name. Defaults to Koala.
|
|
50
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
51
|
+
auth (str, optional): Authentication value for the provider incase it needs. Defaults to None.
|
|
52
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
53
|
+
model (str, optional): LLM model name. Defaults to text-davinci-003|gpt-3.5-turbo.
|
|
54
|
+
ignore_working (bool, optional): Ignore working status of the provider. Defaults to False.
|
|
55
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
56
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
57
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
58
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
59
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
60
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
61
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
62
|
+
"""
|
|
63
|
+
assert provider in available_providers, (
|
|
64
|
+
f"Provider '{provider}' is not yet supported. "
|
|
65
|
+
f"Try others like {', '.join(available_providers)}"
|
|
66
|
+
)
|
|
67
|
+
if model is None:
|
|
68
|
+
model = default_models["chat_completion"]
|
|
69
|
+
|
|
70
|
+
self.is_conversation = is_conversation
|
|
71
|
+
self.max_tokens_to_sample = max_tokens
|
|
72
|
+
self.stream_chunk_size = 64
|
|
73
|
+
self.timeout = timeout
|
|
74
|
+
self.last_response = {}
|
|
75
|
+
|
|
76
|
+
self.__available_optimizers = (
|
|
77
|
+
method
|
|
78
|
+
for method in dir(Optimizers)
|
|
79
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
80
|
+
)
|
|
81
|
+
Conversation.intro = (
|
|
82
|
+
AwesomePrompts().get_act(
|
|
83
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
84
|
+
)
|
|
85
|
+
if act
|
|
86
|
+
else intro or Conversation.intro
|
|
87
|
+
)
|
|
88
|
+
self.conversation = Conversation(
|
|
89
|
+
is_conversation,
|
|
90
|
+
self.max_tokens_to_sample,
|
|
91
|
+
filepath,
|
|
92
|
+
update_file,
|
|
93
|
+
)
|
|
94
|
+
self.conversation.history_offset = history_offset
|
|
95
|
+
self.model = model
|
|
96
|
+
self.provider = provider
|
|
97
|
+
self.ignore_working = ignore_working
|
|
98
|
+
self.auth = auth
|
|
99
|
+
self.proxy = None if not proxies else list(proxies.values())[0]
|
|
100
|
+
|
|
101
|
+
def __str__(self):
|
|
102
|
+
return f"AsyncGPTFREE(provider={self.provider})"
|
|
103
|
+
|
|
104
|
+
async def ask(
|
|
105
|
+
self,
|
|
106
|
+
prompt: str,
|
|
107
|
+
stream: bool = False,
|
|
108
|
+
raw: bool = False,
|
|
109
|
+
optimizer: str = None,
|
|
110
|
+
conversationally: bool = False,
|
|
111
|
+
) -> dict | AsyncGenerator:
|
|
112
|
+
"""Chat with AI asynchronously.
|
|
29
113
|
|
|
114
|
+
Args:
|
|
115
|
+
prompt (str): Prompt to be send.
|
|
116
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
117
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
118
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
119
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
120
|
+
Returns:
|
|
121
|
+
dict|AsyncGenerator : ai content
|
|
122
|
+
```json
|
|
123
|
+
{
|
|
124
|
+
"text" : "How may I help you today?"
|
|
125
|
+
}
|
|
126
|
+
```
|
|
127
|
+
"""
|
|
128
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
129
|
+
if optimizer:
|
|
130
|
+
if optimizer in self.__available_optimizers:
|
|
131
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
132
|
+
conversation_prompt if conversationally else prompt
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
raise Exception(
|
|
136
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
payload = dict(
|
|
140
|
+
model=self.model,
|
|
141
|
+
provider=self.provider, # g4f.Provider.Aichat,
|
|
142
|
+
messages=[{"role": "user", "content": conversation_prompt}],
|
|
143
|
+
stream=True,
|
|
144
|
+
ignore_working=self.ignore_working,
|
|
145
|
+
auth=self.auth,
|
|
146
|
+
proxy=self.proxy,
|
|
147
|
+
timeout=self.timeout,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
async def format_response(response):
|
|
151
|
+
return dict(text=response)
|
|
152
|
+
|
|
153
|
+
async def for_stream():
|
|
154
|
+
previous_chunks = ""
|
|
155
|
+
response = g4f.ChatCompletion.create_async(**payload)
|
|
156
|
+
|
|
157
|
+
async for chunk in response:
|
|
158
|
+
previous_chunks += chunk
|
|
159
|
+
formatted_resp = await format_response(previous_chunks)
|
|
160
|
+
self.last_response.update(formatted_resp)
|
|
161
|
+
yield previous_chunks if raw else formatted_resp
|
|
162
|
+
|
|
163
|
+
self.conversation.update_chat_history(
|
|
164
|
+
prompt,
|
|
165
|
+
previous_chunks,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
async def for_non_stream():
|
|
169
|
+
async for _ in for_stream():
|
|
170
|
+
pass
|
|
171
|
+
return self.last_response
|
|
172
|
+
|
|
173
|
+
return for_stream() if stream else await for_non_stream()
|
|
174
|
+
|
|
175
|
+
async def chat(
|
|
176
|
+
self,
|
|
177
|
+
prompt: str,
|
|
178
|
+
stream: bool = False,
|
|
179
|
+
optimizer: str = None,
|
|
180
|
+
conversationally: bool = False,
|
|
181
|
+
) -> dict | AsyncGenerator:
|
|
182
|
+
"""Generate response `str` asynchronously.
|
|
183
|
+
Args:
|
|
184
|
+
prompt (str): Prompt to be send.
|
|
185
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
186
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
187
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
188
|
+
Returns:
|
|
189
|
+
str|AsyncGenerator: Response generated
|
|
190
|
+
"""
|
|
191
|
+
|
|
192
|
+
async def for_stream():
|
|
193
|
+
async_ask = await self.ask(
|
|
194
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
195
|
+
)
|
|
196
|
+
async for response in async_ask:
|
|
197
|
+
yield await self.get_message(response)
|
|
198
|
+
|
|
199
|
+
async def for_non_stream():
|
|
200
|
+
return await self.get_message(
|
|
201
|
+
await self.ask(
|
|
202
|
+
prompt,
|
|
203
|
+
False,
|
|
204
|
+
optimizer=optimizer,
|
|
205
|
+
conversationally=conversationally,
|
|
206
|
+
)
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
return for_stream() if stream else await for_non_stream()
|
|
210
|
+
|
|
211
|
+
async def get_message(self, response: dict) -> str:
|
|
212
|
+
"""Retrieves message only from response
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
response (dict): Response generated by `self.ask`
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
str: Message extracted
|
|
219
|
+
"""
|
|
220
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
221
|
+
return response["text"]
|
|
30
222
|
class GPT4FREE(Provider):
|
|
31
223
|
def __init__(
|
|
32
224
|
self,
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "1.3.
|
|
1
|
+
__version__ = "1.3.9"
|
|
2
2
|
|
webscout/webai.py
CHANGED
|
@@ -35,7 +35,6 @@ from colorama import Fore
|
|
|
35
35
|
from colorama import init as init_colorama
|
|
36
36
|
from dotenv import load_dotenv
|
|
37
37
|
import g4f
|
|
38
|
-
|
|
39
38
|
import webscout.AIutel
|
|
40
39
|
|
|
41
40
|
init_colorama(autoreset=True)
|
|
@@ -472,6 +471,25 @@ class Main(cmd.Cmd):
|
|
|
472
471
|
history_offset=history_offset,
|
|
473
472
|
act=awesome_prompt,
|
|
474
473
|
)
|
|
474
|
+
elif provider == "yepchat":
|
|
475
|
+
from webscout.AI import YEPCHAT
|
|
476
|
+
|
|
477
|
+
self.bot = YEPCHAT(
|
|
478
|
+
is_conversation=disable_conversation,
|
|
479
|
+
max_tokens=max_tokens,
|
|
480
|
+
temperature=temperature,
|
|
481
|
+
presence_penalty=top_p,
|
|
482
|
+
frequency_penalty=top_k,
|
|
483
|
+
top_p=top_p,
|
|
484
|
+
model=getOr(model, "Mixtral-8x7B-Instruct-v0.1"),
|
|
485
|
+
timeout=timeout,
|
|
486
|
+
intro=intro,
|
|
487
|
+
filepath=filepath,
|
|
488
|
+
update_file=update_file,
|
|
489
|
+
proxies=proxies,
|
|
490
|
+
history_offset=history_offset,
|
|
491
|
+
act=awesome_prompt,
|
|
492
|
+
)
|
|
475
493
|
elif provider == "groq":
|
|
476
494
|
assert auth, (
|
|
477
495
|
"GROQ's API-key is required. " "Use the flag `--key` or `-k`"
|
|
@@ -1135,7 +1153,7 @@ class EntryGroup:
|
|
|
1135
1153
|
# @staticmethod
|
|
1136
1154
|
@click.group()
|
|
1137
1155
|
@click.version_option(
|
|
1138
|
-
webscout.__version__, "-v", "--version", package_name="
|
|
1156
|
+
webscout.__version__, "-v", "--version", package_name="webscout"
|
|
1139
1157
|
)
|
|
1140
1158
|
@click.help_option("-h", "--help")
|
|
1141
1159
|
def webai_():
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 1.3.
|
|
3
|
+
Version: 1.3.9
|
|
4
4
|
Summary: Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos, have TTS support and now has webai(terminal gpt and open interpeter) support
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -47,6 +47,7 @@ Requires-Dist: tiktoken
|
|
|
47
47
|
Requires-Dist: tldextract
|
|
48
48
|
Requires-Dist: orjson
|
|
49
49
|
Requires-Dist: PyYAML
|
|
50
|
+
Requires-Dist: appdirsGoogleBard1 >=2.1.4
|
|
50
51
|
Provides-Extra: dev
|
|
51
52
|
Requires-Dist: ruff >=0.1.6 ; extra == 'dev'
|
|
52
53
|
Requires-Dist: pytest >=7.4.2 ; extra == 'dev'
|
|
@@ -97,10 +98,8 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
97
98
|
- [7. `PERPLEXITY` - Search With PERPLEXITY](#7-perplexity---search-with-perplexity)
|
|
98
99
|
- [8. `OpenGPT` - chat With OPENGPT](#8-opengpt---chat-with-opengpt)
|
|
99
100
|
- [9. `KOBOLDIA` -](#9-koboldia--)
|
|
100
|
-
- [10. `
|
|
101
|
-
- [11. `
|
|
102
|
-
- [12. `Cohere` - chat with cohere](#12-cohere---chat-with-cohere)
|
|
103
|
-
- [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
|
|
101
|
+
- [10. `Reka` - chat with reka](#10-reka---chat-with-reka)
|
|
102
|
+
- [11. `Cohere` - chat with cohere](#11-cohere---chat-with-cohere)
|
|
104
103
|
- [`LLM`](#llm)
|
|
105
104
|
- [`LLM` with internet](#llm-with-internet)
|
|
106
105
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
@@ -688,19 +687,9 @@ response = koboldai.ask(prompt)
|
|
|
688
687
|
message = koboldai.get_message(response)
|
|
689
688
|
print(message)
|
|
690
689
|
|
|
691
|
-
```
|
|
692
|
-
### 10. `Sean` - chat With Sean
|
|
693
|
-
```python
|
|
694
|
-
from webscout.AI import Sean
|
|
695
|
-
|
|
696
|
-
a = Sean(is_conversation=True, max_tokens=8000, timeout=30)
|
|
697
|
-
# This example sends a simple greeting and prints the response
|
|
698
|
-
prompt = "tell me about india"
|
|
699
|
-
response_str = a.chat(prompt)
|
|
700
|
-
print(response_str)
|
|
701
690
|
```
|
|
702
691
|
|
|
703
|
-
###
|
|
692
|
+
### 10. `Reka` - chat with reka
|
|
704
693
|
```python
|
|
705
694
|
from webscout.AI import REKA
|
|
706
695
|
|
|
@@ -711,7 +700,7 @@ response_str = a.chat(prompt)
|
|
|
711
700
|
print(response_str)
|
|
712
701
|
```
|
|
713
702
|
|
|
714
|
-
###
|
|
703
|
+
### 11. `Cohere` - chat with cohere
|
|
715
704
|
```python
|
|
716
705
|
from webscout.AI import Cohere
|
|
717
706
|
|
|
@@ -721,7 +710,6 @@ prompt = "tell me about india"
|
|
|
721
710
|
response_str = a.chat(prompt)
|
|
722
711
|
print(response_str)
|
|
723
712
|
```
|
|
724
|
-
## usage of special .LLM file from webscout (webscout.LLM)
|
|
725
713
|
|
|
726
714
|
### `LLM`
|
|
727
715
|
```python
|
|
@@ -844,7 +832,7 @@ def use_rawdog_with_webai(prompt):
|
|
|
844
832
|
top_k=40,
|
|
845
833
|
top_p=0.95,
|
|
846
834
|
model="command-r-plus", # Replace with your desired model
|
|
847
|
-
auth=
|
|
835
|
+
auth=None, # Replace with your auth key/value (if needed)
|
|
848
836
|
timeout=30,
|
|
849
837
|
disable_conversation=True,
|
|
850
838
|
filepath=None,
|
|
@@ -10,27 +10,28 @@ DeepWEBS/networks/webpage_fetcher.py,sha256=vRB9T3o-nMgrMkG2NPHTDctNeXaPSKCmBXqu
|
|
|
10
10
|
DeepWEBS/utilsdw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
11
|
DeepWEBS/utilsdw/enver.py,sha256=vpI7s4_o_VL9govSryOv-z1zYK3pTEW3-H9QNN8JYtc,2472
|
|
12
12
|
DeepWEBS/utilsdw/logger.py,sha256=Z0nFUcEGyU8r28yKiIyvEtO26xxpmJgbvNToTfwZecc,8174
|
|
13
|
-
webscout/AI.py,sha256=
|
|
14
|
-
webscout/AIbase.py,sha256=
|
|
15
|
-
webscout/AIutel.py,sha256=
|
|
13
|
+
webscout/AI.py,sha256=QR5JKS2WFgQZt9VmZ1Ywqwu19NfV5D5ycJQ3auLE2VI,202197
|
|
14
|
+
webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
|
|
15
|
+
webscout/AIutel.py,sha256=nGzO4T6b7YuxOQigtjNsUBESmDKlk3_CvbIfDdd2KKo,33135
|
|
16
16
|
webscout/DWEBS.py,sha256=QT-7-dUgWhQ_H7EVZD53AVyXxyskoPMKCkFIpzkN56Q,7332
|
|
17
17
|
webscout/LLM.py,sha256=CiDz0okZNEoXuxMwadZnwRGSLpqk2zg0vzvXSxQZjcE,1910
|
|
18
|
-
webscout/__init__.py,sha256=
|
|
18
|
+
webscout/__init__.py,sha256=64KcNfVPc0lGnhjom7aKgjOJF2AYL6KB3y-b8G4C1N0,1046
|
|
19
19
|
webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
|
|
20
|
+
webscout/async_providers.py,sha256=wQWmUlJT5HHKYoN7DMtaMJjzwnfgw8rXFZPyGi97c5o,939
|
|
20
21
|
webscout/cli.py,sha256=F888fdrFUQgczMBN4yMOSf6Nh-IbvkqpPhDsbnA2FtQ,17059
|
|
21
|
-
webscout/exceptions.py,sha256=
|
|
22
|
-
webscout/g4f.py,sha256=
|
|
22
|
+
webscout/exceptions.py,sha256=e4hJnOEAiYuA6BTsMgv4R-vOq0Tt3f9ba0ROTNtPDl4,378
|
|
23
|
+
webscout/g4f.py,sha256=Npxf7YI0eFMxizD9VOI5cE0h4YTbHqgW2WzxVtv2jno,24451
|
|
23
24
|
webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
|
|
24
25
|
webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
|
|
25
26
|
webscout/utils.py,sha256=c_98M4oqpb54pUun3fpGGlCerFD6ZHUbghyp5b7Mwgo,2605
|
|
26
|
-
webscout/version.py,sha256=
|
|
27
|
+
webscout/version.py,sha256=0_JbgTd_VYa7isxUGg7sGu-1HYYtsZzU2SomLqJXvec,25
|
|
27
28
|
webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
|
|
28
|
-
webscout/webai.py,sha256=
|
|
29
|
+
webscout/webai.py,sha256=FQQlTmTsl3V__7V9_jyG-CaggSaDgBr_8XeJOaMXITE,81661
|
|
29
30
|
webscout/webscout_search.py,sha256=3_lli-hDb8_kCGwscK29xuUcOS833ROgpNhDzrxh0dk,3085
|
|
30
31
|
webscout/webscout_search_async.py,sha256=Y5frH0k3hLqBCR-8dn7a_b7EvxdYxn6wHiKl3jWosE0,40670
|
|
31
|
-
webscout-1.3.
|
|
32
|
-
webscout-1.3.
|
|
33
|
-
webscout-1.3.
|
|
34
|
-
webscout-1.3.
|
|
35
|
-
webscout-1.3.
|
|
36
|
-
webscout-1.3.
|
|
32
|
+
webscout-1.3.9.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
|
|
33
|
+
webscout-1.3.9.dist-info/METADATA,sha256=xH4eufoqSRcVb5aKb8ckgKddBIFxLRhBoVgW4Iuw44I,31964
|
|
34
|
+
webscout-1.3.9.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
35
|
+
webscout-1.3.9.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
|
|
36
|
+
webscout-1.3.9.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
|
|
37
|
+
webscout-1.3.9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|