webscout 1.3.8__py3-none-any.whl → 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AI.py +3403 -940
- webscout/AIbase.py +69 -1
- webscout/AIutel.py +19 -2
- webscout/__init__.py +2 -2
- webscout/async_providers.py +33 -0
- webscout/exceptions.py +4 -1
- webscout/g4f.py +193 -1
- webscout/version.py +1 -1
- webscout/webai.py +20 -2
- {webscout-1.3.8.dist-info → webscout-1.4.0.dist-info}/METADATA +65 -58
- {webscout-1.3.8.dist-info → webscout-1.4.0.dist-info}/RECORD +15 -14
- {webscout-1.3.8.dist-info → webscout-1.4.0.dist-info}/LICENSE.md +0 -0
- {webscout-1.3.8.dist-info → webscout-1.4.0.dist-info}/WHEEL +0 -0
- {webscout-1.3.8.dist-info → webscout-1.4.0.dist-info}/entry_points.txt +0 -0
- {webscout-1.3.8.dist-info → webscout-1.4.0.dist-info}/top_level.txt +0 -0
webscout/AIbase.py
CHANGED
|
@@ -3,7 +3,7 @@ from abc import abstractmethod
|
|
|
3
3
|
|
|
4
4
|
|
|
5
5
|
class Provider(ABC):
|
|
6
|
-
"""Base class for
|
|
6
|
+
"""Base class for providers"""
|
|
7
7
|
|
|
8
8
|
@abstractmethod
|
|
9
9
|
def ask(
|
|
@@ -61,6 +61,74 @@ class Provider(ABC):
|
|
|
61
61
|
def get_message(self, response: dict) -> str:
|
|
62
62
|
"""Retrieves message only from response
|
|
63
63
|
|
|
64
|
+
Args:
|
|
65
|
+
response (dict): Response generated by `self.ask`
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
str: Message extracted
|
|
69
|
+
"""
|
|
70
|
+
raise NotImplementedError("Method needs to be implemented in subclass")
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class AsyncProvider(ABC):
|
|
74
|
+
"""Asynchronous base class for providers"""
|
|
75
|
+
|
|
76
|
+
@abstractmethod
|
|
77
|
+
async def ask(
|
|
78
|
+
self,
|
|
79
|
+
prompt: str,
|
|
80
|
+
stream: bool = False,
|
|
81
|
+
raw: bool = False,
|
|
82
|
+
optimizer: str = None,
|
|
83
|
+
conversationally: bool = False,
|
|
84
|
+
) -> dict:
|
|
85
|
+
"""Asynchronously chat with AI
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
prompt (str): Prompt to be sent
|
|
89
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
90
|
+
raw (bool, optional): Stream back raw response as received
|
|
91
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`
|
|
92
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
93
|
+
Returns:
|
|
94
|
+
dict : {}
|
|
95
|
+
```json
|
|
96
|
+
{
|
|
97
|
+
"completion": "\nNext: domestic cat breeds with short hair >>",
|
|
98
|
+
"stop_reason": null,
|
|
99
|
+
"truncated": false,
|
|
100
|
+
"stop": null,
|
|
101
|
+
"model": "llama-2-13b-chat",
|
|
102
|
+
"log_id": "cmpl-3kYiYxSNDvgMShSzFooz6t",
|
|
103
|
+
"exception": null
|
|
104
|
+
}
|
|
105
|
+
```
|
|
106
|
+
"""
|
|
107
|
+
raise NotImplementedError("Method needs to be implemented in subclass")
|
|
108
|
+
|
|
109
|
+
@abstractmethod
|
|
110
|
+
async def chat(
|
|
111
|
+
self,
|
|
112
|
+
prompt: str,
|
|
113
|
+
stream: bool = False,
|
|
114
|
+
optimizer: str = None,
|
|
115
|
+
conversationally: bool = False,
|
|
116
|
+
) -> str:
|
|
117
|
+
"""Asynchronously generate response `str`
|
|
118
|
+
Args:
|
|
119
|
+
prompt (str): Prompt to be sent
|
|
120
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
121
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`
|
|
122
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
123
|
+
Returns:
|
|
124
|
+
str: Response generated
|
|
125
|
+
"""
|
|
126
|
+
raise NotImplementedError("Method needs to be implemented in subclass")
|
|
127
|
+
|
|
128
|
+
@abstractmethod
|
|
129
|
+
async def get_message(self, response: dict) -> str:
|
|
130
|
+
"""Asynchronously retrieves message only from response
|
|
131
|
+
|
|
64
132
|
Args:
|
|
65
133
|
response (dict): Response generated by `self.ask`
|
|
66
134
|
|
webscout/AIutel.py
CHANGED
|
@@ -34,10 +34,10 @@ webai = [
|
|
|
34
34
|
"blackboxai",
|
|
35
35
|
"g4fauto",
|
|
36
36
|
"perplexity",
|
|
37
|
-
"sean",
|
|
38
37
|
"groq",
|
|
39
38
|
"reka",
|
|
40
|
-
"cohere"
|
|
39
|
+
"cohere",
|
|
40
|
+
"yepchat",
|
|
41
41
|
]
|
|
42
42
|
|
|
43
43
|
gpt4free_providers = [
|
|
@@ -45,7 +45,24 @@ gpt4free_providers = [
|
|
|
45
45
|
]
|
|
46
46
|
|
|
47
47
|
available_providers = webai + gpt4free_providers
|
|
48
|
+
def sanitize_stream(
|
|
49
|
+
chunk: str, intro_value: str = "data:", to_json: bool = True
|
|
50
|
+
) -> str | dict:
|
|
51
|
+
"""Remove streaming flags
|
|
48
52
|
|
|
53
|
+
Args:
|
|
54
|
+
chunk (str): Streamig chunk.
|
|
55
|
+
intro_value (str, optional): streaming flag. Defaults to "data:".
|
|
56
|
+
to_json (bool, optional). Return chunk as dictionary. Defaults to True.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
str: Sanitized streaming value.
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
if chunk.startswith(intro_value):
|
|
63
|
+
chunk = chunk[len(intro_value) :]
|
|
64
|
+
|
|
65
|
+
return json.loads(chunk) if to_json else chunk
|
|
49
66
|
def run_system_command(
|
|
50
67
|
command: str,
|
|
51
68
|
exit_on_error: bool = True,
|
webscout/__init__.py
CHANGED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from webscout.AI import AsyncPhindSearch
|
|
2
|
+
from webscout.AI import AsyncYEPCHAT
|
|
3
|
+
from webscout.AI import AsyncOPENGPT
|
|
4
|
+
from webscout.AI import AsyncOPENAI
|
|
5
|
+
from webscout.AI import AsyncLLAMA2
|
|
6
|
+
from webscout.AI import AsyncLEO
|
|
7
|
+
from webscout.AI import AsyncKOBOLDAI
|
|
8
|
+
from webscout.AI import AsyncGROQ
|
|
9
|
+
from webscout.AI import AsyncBLACKBOXAI
|
|
10
|
+
from webscout.AI import AsyncGPT4FREE
|
|
11
|
+
|
|
12
|
+
mapper: dict[str, object] = {
|
|
13
|
+
"phind": AsyncPhindSearch,
|
|
14
|
+
"opengpt": AsyncOPENGPT,
|
|
15
|
+
"koboldai": AsyncKOBOLDAI,
|
|
16
|
+
"blackboxai": AsyncBLACKBOXAI,
|
|
17
|
+
"gpt4free": AsyncGPT4FREE,
|
|
18
|
+
"llama2": AsyncLLAMA2,
|
|
19
|
+
"yepchat": AsyncYEPCHAT,
|
|
20
|
+
"leo": AsyncLEO,
|
|
21
|
+
"groq": AsyncGROQ,
|
|
22
|
+
"openai": AsyncOPENAI,
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
tgpt_mapper: dict[str, object] = {
|
|
26
|
+
"phind": AsyncPhindSearch,
|
|
27
|
+
"opengpt": AsyncOPENGPT,
|
|
28
|
+
"koboldai": AsyncKOBOLDAI,
|
|
29
|
+
# "gpt4free": AsyncGPT4FREE,
|
|
30
|
+
"blackboxai": AsyncBLACKBOXAI,
|
|
31
|
+
"llama2": AsyncLLAMA2,
|
|
32
|
+
"yepchat": AsyncYEPCHAT,
|
|
33
|
+
}
|
webscout/exceptions.py
CHANGED
|
@@ -7,4 +7,7 @@ class RatelimitE(Exception):
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class TimeoutE(Exception):
|
|
10
|
-
"""Raised for timeout errors during API requests."""
|
|
10
|
+
"""Raised for timeout errors during API requests."""
|
|
11
|
+
|
|
12
|
+
class FailedToGenerateResponseError(Exception):
|
|
13
|
+
"""Provider failed to fetch response"""
|
webscout/g4f.py
CHANGED
|
@@ -2,7 +2,7 @@ import g4f
|
|
|
2
2
|
from webscout.AIutel import Optimizers
|
|
3
3
|
from webscout.AIutel import Conversation
|
|
4
4
|
from webscout.AIutel import AwesomePrompts
|
|
5
|
-
from webscout.AIbase import Provider
|
|
5
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
6
6
|
from webscout.AIutel import available_providers
|
|
7
7
|
|
|
8
8
|
|
|
@@ -26,7 +26,199 @@ default_models = {
|
|
|
26
26
|
|
|
27
27
|
default_provider = "Koala"
|
|
28
28
|
|
|
29
|
+
class AsyncGPT4FREE(AsyncProvider):
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
provider: str = default_provider,
|
|
33
|
+
is_conversation: bool = True,
|
|
34
|
+
auth: str = None,
|
|
35
|
+
max_tokens: int = 600,
|
|
36
|
+
model: str = None,
|
|
37
|
+
ignore_working: bool = False,
|
|
38
|
+
timeout: int = 30,
|
|
39
|
+
intro: str = None,
|
|
40
|
+
filepath: str = None,
|
|
41
|
+
update_file: bool = True,
|
|
42
|
+
proxies: dict = {},
|
|
43
|
+
history_offset: int = 10250,
|
|
44
|
+
act: str = None,
|
|
45
|
+
):
|
|
46
|
+
"""Initialies GPT4FREE
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
provider (str, optional): gpt4free based provider name. Defaults to Koala.
|
|
50
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
51
|
+
auth (str, optional): Authentication value for the provider incase it needs. Defaults to None.
|
|
52
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
53
|
+
model (str, optional): LLM model name. Defaults to text-davinci-003|gpt-3.5-turbo.
|
|
54
|
+
ignore_working (bool, optional): Ignore working status of the provider. Defaults to False.
|
|
55
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
56
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
57
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
58
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
59
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
60
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
61
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
62
|
+
"""
|
|
63
|
+
assert provider in available_providers, (
|
|
64
|
+
f"Provider '{provider}' is not yet supported. "
|
|
65
|
+
f"Try others like {', '.join(available_providers)}"
|
|
66
|
+
)
|
|
67
|
+
if model is None:
|
|
68
|
+
model = default_models["chat_completion"]
|
|
69
|
+
|
|
70
|
+
self.is_conversation = is_conversation
|
|
71
|
+
self.max_tokens_to_sample = max_tokens
|
|
72
|
+
self.stream_chunk_size = 64
|
|
73
|
+
self.timeout = timeout
|
|
74
|
+
self.last_response = {}
|
|
75
|
+
|
|
76
|
+
self.__available_optimizers = (
|
|
77
|
+
method
|
|
78
|
+
for method in dir(Optimizers)
|
|
79
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
80
|
+
)
|
|
81
|
+
Conversation.intro = (
|
|
82
|
+
AwesomePrompts().get_act(
|
|
83
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
84
|
+
)
|
|
85
|
+
if act
|
|
86
|
+
else intro or Conversation.intro
|
|
87
|
+
)
|
|
88
|
+
self.conversation = Conversation(
|
|
89
|
+
is_conversation,
|
|
90
|
+
self.max_tokens_to_sample,
|
|
91
|
+
filepath,
|
|
92
|
+
update_file,
|
|
93
|
+
)
|
|
94
|
+
self.conversation.history_offset = history_offset
|
|
95
|
+
self.model = model
|
|
96
|
+
self.provider = provider
|
|
97
|
+
self.ignore_working = ignore_working
|
|
98
|
+
self.auth = auth
|
|
99
|
+
self.proxy = None if not proxies else list(proxies.values())[0]
|
|
100
|
+
|
|
101
|
+
def __str__(self):
|
|
102
|
+
return f"AsyncGPTFREE(provider={self.provider})"
|
|
103
|
+
|
|
104
|
+
async def ask(
|
|
105
|
+
self,
|
|
106
|
+
prompt: str,
|
|
107
|
+
stream: bool = False,
|
|
108
|
+
raw: bool = False,
|
|
109
|
+
optimizer: str = None,
|
|
110
|
+
conversationally: bool = False,
|
|
111
|
+
) -> dict | AsyncGenerator:
|
|
112
|
+
"""Chat with AI asynchronously.
|
|
29
113
|
|
|
114
|
+
Args:
|
|
115
|
+
prompt (str): Prompt to be send.
|
|
116
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
117
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
118
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
119
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
120
|
+
Returns:
|
|
121
|
+
dict|AsyncGenerator : ai content
|
|
122
|
+
```json
|
|
123
|
+
{
|
|
124
|
+
"text" : "How may I help you today?"
|
|
125
|
+
}
|
|
126
|
+
```
|
|
127
|
+
"""
|
|
128
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
129
|
+
if optimizer:
|
|
130
|
+
if optimizer in self.__available_optimizers:
|
|
131
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
132
|
+
conversation_prompt if conversationally else prompt
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
raise Exception(
|
|
136
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
payload = dict(
|
|
140
|
+
model=self.model,
|
|
141
|
+
provider=self.provider, # g4f.Provider.Aichat,
|
|
142
|
+
messages=[{"role": "user", "content": conversation_prompt}],
|
|
143
|
+
stream=True,
|
|
144
|
+
ignore_working=self.ignore_working,
|
|
145
|
+
auth=self.auth,
|
|
146
|
+
proxy=self.proxy,
|
|
147
|
+
timeout=self.timeout,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
async def format_response(response):
|
|
151
|
+
return dict(text=response)
|
|
152
|
+
|
|
153
|
+
async def for_stream():
|
|
154
|
+
previous_chunks = ""
|
|
155
|
+
response = g4f.ChatCompletion.create_async(**payload)
|
|
156
|
+
|
|
157
|
+
async for chunk in response:
|
|
158
|
+
previous_chunks += chunk
|
|
159
|
+
formatted_resp = await format_response(previous_chunks)
|
|
160
|
+
self.last_response.update(formatted_resp)
|
|
161
|
+
yield previous_chunks if raw else formatted_resp
|
|
162
|
+
|
|
163
|
+
self.conversation.update_chat_history(
|
|
164
|
+
prompt,
|
|
165
|
+
previous_chunks,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
async def for_non_stream():
|
|
169
|
+
async for _ in for_stream():
|
|
170
|
+
pass
|
|
171
|
+
return self.last_response
|
|
172
|
+
|
|
173
|
+
return for_stream() if stream else await for_non_stream()
|
|
174
|
+
|
|
175
|
+
async def chat(
|
|
176
|
+
self,
|
|
177
|
+
prompt: str,
|
|
178
|
+
stream: bool = False,
|
|
179
|
+
optimizer: str = None,
|
|
180
|
+
conversationally: bool = False,
|
|
181
|
+
) -> dict | AsyncGenerator:
|
|
182
|
+
"""Generate response `str` asynchronously.
|
|
183
|
+
Args:
|
|
184
|
+
prompt (str): Prompt to be send.
|
|
185
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
186
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
187
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
188
|
+
Returns:
|
|
189
|
+
str|AsyncGenerator: Response generated
|
|
190
|
+
"""
|
|
191
|
+
|
|
192
|
+
async def for_stream():
|
|
193
|
+
async_ask = await self.ask(
|
|
194
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
195
|
+
)
|
|
196
|
+
async for response in async_ask:
|
|
197
|
+
yield await self.get_message(response)
|
|
198
|
+
|
|
199
|
+
async def for_non_stream():
|
|
200
|
+
return await self.get_message(
|
|
201
|
+
await self.ask(
|
|
202
|
+
prompt,
|
|
203
|
+
False,
|
|
204
|
+
optimizer=optimizer,
|
|
205
|
+
conversationally=conversationally,
|
|
206
|
+
)
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
return for_stream() if stream else await for_non_stream()
|
|
210
|
+
|
|
211
|
+
async def get_message(self, response: dict) -> str:
|
|
212
|
+
"""Retrieves message only from response
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
response (dict): Response generated by `self.ask`
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
str: Message extracted
|
|
219
|
+
"""
|
|
220
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
221
|
+
return response["text"]
|
|
30
222
|
class GPT4FREE(Provider):
|
|
31
223
|
def __init__(
|
|
32
224
|
self,
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "1.
|
|
1
|
+
__version__ = "1.4.0"
|
|
2
2
|
|
webscout/webai.py
CHANGED
|
@@ -35,7 +35,6 @@ from colorama import Fore
|
|
|
35
35
|
from colorama import init as init_colorama
|
|
36
36
|
from dotenv import load_dotenv
|
|
37
37
|
import g4f
|
|
38
|
-
|
|
39
38
|
import webscout.AIutel
|
|
40
39
|
|
|
41
40
|
init_colorama(autoreset=True)
|
|
@@ -472,6 +471,25 @@ class Main(cmd.Cmd):
|
|
|
472
471
|
history_offset=history_offset,
|
|
473
472
|
act=awesome_prompt,
|
|
474
473
|
)
|
|
474
|
+
elif provider == "yepchat":
|
|
475
|
+
from webscout.AI import YEPCHAT
|
|
476
|
+
|
|
477
|
+
self.bot = YEPCHAT(
|
|
478
|
+
is_conversation=disable_conversation,
|
|
479
|
+
max_tokens=max_tokens,
|
|
480
|
+
temperature=temperature,
|
|
481
|
+
presence_penalty=top_p,
|
|
482
|
+
frequency_penalty=top_k,
|
|
483
|
+
top_p=top_p,
|
|
484
|
+
model=getOr(model, "Mixtral-8x7B-Instruct-v0.1"),
|
|
485
|
+
timeout=timeout,
|
|
486
|
+
intro=intro,
|
|
487
|
+
filepath=filepath,
|
|
488
|
+
update_file=update_file,
|
|
489
|
+
proxies=proxies,
|
|
490
|
+
history_offset=history_offset,
|
|
491
|
+
act=awesome_prompt,
|
|
492
|
+
)
|
|
475
493
|
elif provider == "groq":
|
|
476
494
|
assert auth, (
|
|
477
495
|
"GROQ's API-key is required. " "Use the flag `--key` or `-k`"
|
|
@@ -1135,7 +1153,7 @@ class EntryGroup:
|
|
|
1135
1153
|
# @staticmethod
|
|
1136
1154
|
@click.group()
|
|
1137
1155
|
@click.version_option(
|
|
1138
|
-
webscout.__version__, "-v", "--version", package_name="
|
|
1156
|
+
webscout.__version__, "-v", "--version", package_name="webscout"
|
|
1139
1157
|
)
|
|
1140
1158
|
@click.help_option("-h", "--help")
|
|
1141
1159
|
def webai_():
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.4.0
|
|
4
4
|
Summary: Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos, have TTS support and now has webai(terminal gpt and open interpeter) support
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -47,6 +47,8 @@ Requires-Dist: tiktoken
|
|
|
47
47
|
Requires-Dist: tldextract
|
|
48
48
|
Requires-Dist: orjson
|
|
49
49
|
Requires-Dist: PyYAML
|
|
50
|
+
Requires-Dist: appdirs
|
|
51
|
+
Requires-Dist: GoogleBard1 >=2.1.4
|
|
50
52
|
Provides-Extra: dev
|
|
51
53
|
Requires-Dist: ruff >=0.1.6 ; extra == 'dev'
|
|
52
54
|
Requires-Dist: pytest >=7.4.2 ; extra == 'dev'
|
|
@@ -89,7 +91,7 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
89
91
|
- [usage of webscout.AI](#usage-of-webscoutai)
|
|
90
92
|
- [1. `PhindSearch` - Search using Phind.com](#1-phindsearch---search-using-phindcom)
|
|
91
93
|
- [2. `YepChat` - Chat with mistral 8x7b powered by yepchat](#2-yepchat---chat-with-mistral-8x7b-powered-by-yepchat)
|
|
92
|
-
- [3. `You.com` - search with you.com](#3-youcom---search-with-youcom)
|
|
94
|
+
- [3. `You.com` - search with you.com -NOT WORKING](#3-youcom---search-with-youcom--not-working)
|
|
93
95
|
- [4. `Gemini` - search with google gemini](#4-gemini---search-with-google-gemini)
|
|
94
96
|
- [usage of image generator from Webscout.AI](#usage-of-image-generator-from-webscoutai)
|
|
95
97
|
- [5. `Prodia` - make image using prodia](#5-prodia---make-image-using-prodia)
|
|
@@ -97,11 +99,9 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
|
|
|
97
99
|
- [7. `PERPLEXITY` - Search With PERPLEXITY](#7-perplexity---search-with-perplexity)
|
|
98
100
|
- [8. `OpenGPT` - chat With OPENGPT](#8-opengpt---chat-with-opengpt)
|
|
99
101
|
- [9. `KOBOLDIA` -](#9-koboldia--)
|
|
100
|
-
- [10. `
|
|
101
|
-
- [11. `
|
|
102
|
-
- [
|
|
103
|
-
- [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
|
|
104
|
-
- [`LLM`](#llm)
|
|
102
|
+
- [10. `Reka` - chat with reka](#10-reka---chat-with-reka)
|
|
103
|
+
- [11. `Cohere` - chat with cohere](#11-cohere---chat-with-cohere)
|
|
104
|
+
- [`LLM` --not working](#llm---not-working)
|
|
105
105
|
- [`LLM` with internet](#llm-with-internet)
|
|
106
106
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
107
107
|
|
|
@@ -553,26 +553,22 @@ message = ph.get_message(response)
|
|
|
553
553
|
print(message)
|
|
554
554
|
```
|
|
555
555
|
### 2. `YepChat` - Chat with mistral 8x7b powered by yepchat
|
|
556
|
-
Thanks To Divyansh Shukla for This code
|
|
557
556
|
```python
|
|
558
|
-
from webscout.AI import
|
|
557
|
+
from webscout.AI import YEPCHAT
|
|
559
558
|
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
print(processed_response)
|
|
559
|
+
# Instantiate the YEPCHAT class with default parameters
|
|
560
|
+
YEPCHAT = YEPCHAT()
|
|
561
|
+
|
|
562
|
+
# Define a prompt to send to the AI
|
|
563
|
+
prompt = "What is the capital of France?"
|
|
564
|
+
|
|
565
|
+
# Use the 'cha' method to get a response from the AI
|
|
566
|
+
r = YEPCHAT.chat(prompt)
|
|
567
|
+
print(r)
|
|
570
568
|
|
|
571
|
-
if __name__ == "__main__":
|
|
572
|
-
main()
|
|
573
569
|
```
|
|
574
570
|
|
|
575
|
-
### 3. `You.com` - search with you.com
|
|
571
|
+
### 3. `You.com` - search with you.com -NOT WORKING
|
|
576
572
|
```python
|
|
577
573
|
from webscout.AI import youChat
|
|
578
574
|
|
|
@@ -598,15 +594,34 @@ while True:
|
|
|
598
594
|
### 4. `Gemini` - search with google gemini
|
|
599
595
|
|
|
600
596
|
```python
|
|
601
|
-
|
|
597
|
+
import webscout
|
|
598
|
+
from webscout.AI import GEMINI
|
|
602
599
|
|
|
603
|
-
#
|
|
604
|
-
|
|
600
|
+
# Replace with the path to your bard.google.com.cookies.json file
|
|
601
|
+
COOKIE_FILE = "path/to/bard.google.com.cookies.json"
|
|
605
602
|
|
|
606
|
-
#
|
|
607
|
-
|
|
603
|
+
# Optional: Provide proxy details if needed
|
|
604
|
+
PROXIES = {
|
|
605
|
+
"http": "http://proxy_server:port",
|
|
606
|
+
"https": "https://proxy_server:port",
|
|
607
|
+
}
|
|
608
608
|
|
|
609
|
-
#
|
|
609
|
+
# Initialize GEMINI with cookie file and optional proxies
|
|
610
|
+
gemini = GEMINI(cookie_file=COOKIE_FILE, proxy=PROXIES)
|
|
611
|
+
|
|
612
|
+
# Ask a question and print the response
|
|
613
|
+
response = gemini.chat("What is the meaning of life?")
|
|
614
|
+
print(response)
|
|
615
|
+
|
|
616
|
+
# Ask another question, this time streaming the response
|
|
617
|
+
for chunk in gemini.chat("Tell me a story", stream=True):
|
|
618
|
+
print(chunk, end="")
|
|
619
|
+
|
|
620
|
+
# Reset the conversation to start a new interaction
|
|
621
|
+
gemini.reset()
|
|
622
|
+
|
|
623
|
+
# Ask a question with the code optimizer
|
|
624
|
+
response = gemini.chat("Write Python code to print 'Hello, world!'", optimizer="code")
|
|
610
625
|
print(response)
|
|
611
626
|
```
|
|
612
627
|
## usage of image generator from Webscout.AI
|
|
@@ -638,17 +653,18 @@ ai = BLACKBOXAI(
|
|
|
638
653
|
model=None # You can specify a model if needed
|
|
639
654
|
)
|
|
640
655
|
|
|
641
|
-
#
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
#
|
|
651
|
-
|
|
656
|
+
# Start an infinite loop for continuous interaction
|
|
657
|
+
while True:
|
|
658
|
+
# Define a prompt to send to the AI
|
|
659
|
+
prompt = input("Enter your prompt: ")
|
|
660
|
+
|
|
661
|
+
# Check if the user wants to exit the loop
|
|
662
|
+
if prompt.lower() == "exit":
|
|
663
|
+
break
|
|
664
|
+
|
|
665
|
+
# Use the 'chat' method to send the prompt and receive a response
|
|
666
|
+
r = ai.chat(prompt)
|
|
667
|
+
print(r)
|
|
652
668
|
```
|
|
653
669
|
### 7. `PERPLEXITY` - Search With PERPLEXITY
|
|
654
670
|
```python
|
|
@@ -666,10 +682,12 @@ print(response)
|
|
|
666
682
|
from webscout.AI import OPENGPT
|
|
667
683
|
|
|
668
684
|
opengpt = OPENGPT(is_conversation=True, max_tokens=8000, timeout=30)
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
print
|
|
685
|
+
while True:
|
|
686
|
+
# Prompt the user for input
|
|
687
|
+
prompt = input("Enter your prompt: ")
|
|
688
|
+
# Send the prompt to the OPENGPT model and print the response
|
|
689
|
+
response_str = opengpt.chat(prompt)
|
|
690
|
+
print(response_str)
|
|
673
691
|
```
|
|
674
692
|
### 9. `KOBOLDIA` -
|
|
675
693
|
```python
|
|
@@ -688,19 +706,9 @@ response = koboldai.ask(prompt)
|
|
|
688
706
|
message = koboldai.get_message(response)
|
|
689
707
|
print(message)
|
|
690
708
|
|
|
691
|
-
```
|
|
692
|
-
### 10. `Sean` - chat With Sean
|
|
693
|
-
```python
|
|
694
|
-
from webscout.AI import Sean
|
|
695
|
-
|
|
696
|
-
a = Sean(is_conversation=True, max_tokens=8000, timeout=30)
|
|
697
|
-
# This example sends a simple greeting and prints the response
|
|
698
|
-
prompt = "tell me about india"
|
|
699
|
-
response_str = a.chat(prompt)
|
|
700
|
-
print(response_str)
|
|
701
709
|
```
|
|
702
710
|
|
|
703
|
-
###
|
|
711
|
+
### 10. `Reka` - chat with reka
|
|
704
712
|
```python
|
|
705
713
|
from webscout.AI import REKA
|
|
706
714
|
|
|
@@ -711,7 +719,7 @@ response_str = a.chat(prompt)
|
|
|
711
719
|
print(response_str)
|
|
712
720
|
```
|
|
713
721
|
|
|
714
|
-
###
|
|
722
|
+
### 11. `Cohere` - chat with cohere
|
|
715
723
|
```python
|
|
716
724
|
from webscout.AI import Cohere
|
|
717
725
|
|
|
@@ -721,9 +729,8 @@ prompt = "tell me about india"
|
|
|
721
729
|
response_str = a.chat(prompt)
|
|
722
730
|
print(response_str)
|
|
723
731
|
```
|
|
724
|
-
## usage of special .LLM file from webscout (webscout.LLM)
|
|
725
732
|
|
|
726
|
-
### `LLM`
|
|
733
|
+
### `LLM` --not working
|
|
727
734
|
```python
|
|
728
735
|
from webscout.LLM import LLM
|
|
729
736
|
|
|
@@ -844,7 +851,7 @@ def use_rawdog_with_webai(prompt):
|
|
|
844
851
|
top_k=40,
|
|
845
852
|
top_p=0.95,
|
|
846
853
|
model="command-r-plus", # Replace with your desired model
|
|
847
|
-
auth=
|
|
854
|
+
auth=None, # Replace with your auth key/value (if needed)
|
|
848
855
|
timeout=30,
|
|
849
856
|
disable_conversation=True,
|
|
850
857
|
filepath=None,
|