webscout 2.4__tar.gz → 2.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- {webscout-2.4 → webscout-2.6}/PKG-INFO +24 -2
- {webscout-2.4 → webscout-2.6}/README.md +21 -0
- {webscout-2.4 → webscout-2.6}/setup.py +2 -2
- {webscout-2.4 → webscout-2.6}/webscout/AIutel.py +20 -20
- {webscout-2.4 → webscout-2.6}/webscout/Local/_version.py +1 -1
- {webscout-2.4 → webscout-2.6}/webscout/Local/formats.py +0 -13
- {webscout-2.4 → webscout-2.6}/webscout/Local/thread.py +1 -1
- webscout-2.6/webscout/Provider/Poe.py +208 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/__init__.py +2 -1
- {webscout-2.4 → webscout-2.6}/webscout/__init__.py +3 -29
- {webscout-2.4 → webscout-2.6}/webscout/webai.py +16 -0
- {webscout-2.4 → webscout-2.6}/webscout.egg-info/PKG-INFO +24 -2
- {webscout-2.4 → webscout-2.6}/webscout.egg-info/SOURCES.txt +1 -0
- {webscout-2.4 → webscout-2.6}/webscout.egg-info/requires.txt +2 -1
- {webscout-2.4 → webscout-2.6}/DeepWEBS/__init__.py +0 -0
- {webscout-2.4 → webscout-2.6}/DeepWEBS/documents/__init__.py +0 -0
- {webscout-2.4 → webscout-2.6}/DeepWEBS/documents/query_results_extractor.py +0 -0
- {webscout-2.4 → webscout-2.6}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
- {webscout-2.4 → webscout-2.6}/DeepWEBS/networks/__init__.py +0 -0
- {webscout-2.4 → webscout-2.6}/DeepWEBS/networks/filepath_converter.py +0 -0
- {webscout-2.4 → webscout-2.6}/DeepWEBS/networks/google_searcher.py +0 -0
- {webscout-2.4 → webscout-2.6}/DeepWEBS/networks/network_configs.py +0 -0
- {webscout-2.4 → webscout-2.6}/DeepWEBS/networks/webpage_fetcher.py +0 -0
- {webscout-2.4 → webscout-2.6}/DeepWEBS/utilsdw/__init__.py +0 -0
- {webscout-2.4 → webscout-2.6}/DeepWEBS/utilsdw/enver.py +0 -0
- {webscout-2.4 → webscout-2.6}/DeepWEBS/utilsdw/logger.py +0 -0
- {webscout-2.4 → webscout-2.6}/LICENSE.md +0 -0
- {webscout-2.4 → webscout-2.6}/setup.cfg +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/AIauto.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/AIbase.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/DWEBS.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/LLM.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Local/__init__.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Local/model.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Local/samplers.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Local/utils.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Berlin4h.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Blackboxai.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/ChatGPTUK.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Cohere.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Gemini.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Groq.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Koboldai.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Leo.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Llama2.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/OpenGPT.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Openai.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Perplexity.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Phind.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Reka.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/ThinkAnyAI.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Xjai.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Yepchat.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/Provider/Youchat.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/__main__.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/async_providers.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/cli.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/exceptions.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/g4f.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/models.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/tempid.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/transcriber.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/utils.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/version.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/voice.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/webscout_search.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout/webscout_search_async.py +0 -0
- {webscout-2.4 → webscout-2.6}/webscout.egg-info/dependency_links.txt +0 -0
- {webscout-2.4 → webscout-2.6}/webscout.egg-info/entry_points.txt +0 -0
- {webscout-2.4 → webscout-2.6}/webscout.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.6
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -50,7 +50,8 @@ Requires-Dist: appdirs
|
|
|
50
50
|
Requires-Dist: GoogleBard1>=2.1.4
|
|
51
51
|
Requires-Dist: tls_client
|
|
52
52
|
Requires-Dist: clipman
|
|
53
|
-
Requires-Dist: Helpingai-
|
|
53
|
+
Requires-Dist: Helpingai-T2
|
|
54
|
+
Requires-Dist: playsound
|
|
54
55
|
Provides-Extra: dev
|
|
55
56
|
Requires-Dist: ruff>=0.1.6; extra == "dev"
|
|
56
57
|
Requires-Dist: pytest>=7.4.2; extra == "dev"
|
|
@@ -134,6 +135,7 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
|
|
|
134
135
|
- [13. `ThinkAny` - AI search engine](#13-thinkany---ai-search-engine)
|
|
135
136
|
- [14. `chatgptuk` - Chat with gemini-pro](#14-chatgptuk---chat-with-gemini-pro)
|
|
136
137
|
- [`LLM`](#llm)
|
|
138
|
+
- [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
|
|
137
139
|
- [`LLM` with internet](#llm-with-internet)
|
|
138
140
|
- [LLM with deepwebs](#llm-with-deepwebs)
|
|
139
141
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
@@ -1224,6 +1226,26 @@ while True:
|
|
|
1224
1226
|
# Print the response
|
|
1225
1227
|
print("AI: ", response)
|
|
1226
1228
|
```
|
|
1229
|
+
### `Local-LLM` webscout can now run GGUF models
|
|
1230
|
+
```python
|
|
1231
|
+
from webscout.Local.utils import download_model
|
|
1232
|
+
from webscout.Local.model import Model
|
|
1233
|
+
from webscout.Local.thread import Thread
|
|
1234
|
+
from webscout.Local import formats
|
|
1235
|
+
# 1. Download the model
|
|
1236
|
+
repo_id = "microsoft/Phi-3-mini-4k-instruct-gguf" # Replace with the desired Hugging Face repo
|
|
1237
|
+
filename = "Phi-3-mini-4k-instruct-q4.gguf" # Replace with the correct filename
|
|
1238
|
+
model_path = download_model(repo_id, filename)
|
|
1239
|
+
|
|
1240
|
+
# 2. Load the model
|
|
1241
|
+
model = Model(model_path, n_gpu_layers=4)
|
|
1242
|
+
|
|
1243
|
+
# 3. Create a Thread for conversation
|
|
1244
|
+
thread = Thread(model, formats.phi3)
|
|
1245
|
+
|
|
1246
|
+
# 4. Start interacting with the model
|
|
1247
|
+
thread.interact()
|
|
1248
|
+
```
|
|
1227
1249
|
### `LLM` with internet
|
|
1228
1250
|
```python
|
|
1229
1251
|
from __future__ import annotations
|
|
@@ -73,6 +73,7 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
|
|
|
73
73
|
- [13. `ThinkAny` - AI search engine](#13-thinkany---ai-search-engine)
|
|
74
74
|
- [14. `chatgptuk` - Chat with gemini-pro](#14-chatgptuk---chat-with-gemini-pro)
|
|
75
75
|
- [`LLM`](#llm)
|
|
76
|
+
- [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
|
|
76
77
|
- [`LLM` with internet](#llm-with-internet)
|
|
77
78
|
- [LLM with deepwebs](#llm-with-deepwebs)
|
|
78
79
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
@@ -1163,6 +1164,26 @@ while True:
|
|
|
1163
1164
|
# Print the response
|
|
1164
1165
|
print("AI: ", response)
|
|
1165
1166
|
```
|
|
1167
|
+
### `Local-LLM` webscout can now run GGUF models
|
|
1168
|
+
```python
|
|
1169
|
+
from webscout.Local.utils import download_model
|
|
1170
|
+
from webscout.Local.model import Model
|
|
1171
|
+
from webscout.Local.thread import Thread
|
|
1172
|
+
from webscout.Local import formats
|
|
1173
|
+
# 1. Download the model
|
|
1174
|
+
repo_id = "microsoft/Phi-3-mini-4k-instruct-gguf" # Replace with the desired Hugging Face repo
|
|
1175
|
+
filename = "Phi-3-mini-4k-instruct-q4.gguf" # Replace with the correct filename
|
|
1176
|
+
model_path = download_model(repo_id, filename)
|
|
1177
|
+
|
|
1178
|
+
# 2. Load the model
|
|
1179
|
+
model = Model(model_path, n_gpu_layers=4)
|
|
1180
|
+
|
|
1181
|
+
# 3. Create a Thread for conversation
|
|
1182
|
+
thread = Thread(model, formats.phi3)
|
|
1183
|
+
|
|
1184
|
+
# 4. Start interacting with the model
|
|
1185
|
+
thread.interact()
|
|
1186
|
+
```
|
|
1166
1187
|
### `LLM` with internet
|
|
1167
1188
|
```python
|
|
1168
1189
|
from __future__ import annotations
|
|
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
|
|
|
5
5
|
|
|
6
6
|
setup(
|
|
7
7
|
name="webscout",
|
|
8
|
-
version="2.
|
|
8
|
+
version="2.6",
|
|
9
9
|
description="Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs",
|
|
10
10
|
long_description=README,
|
|
11
11
|
long_description_content_type="text/markdown",
|
|
@@ -54,7 +54,7 @@ setup(
|
|
|
54
54
|
"GoogleBard1>=2.1.4",
|
|
55
55
|
"tls_client",
|
|
56
56
|
"clipman",
|
|
57
|
-
"Helpingai-T2"
|
|
57
|
+
"Helpingai-T2",
|
|
58
58
|
"playsound",
|
|
59
59
|
],
|
|
60
60
|
entry_points={
|
|
@@ -26,27 +26,27 @@ default_path = appdir.user_cache_dir
|
|
|
26
26
|
if not os.path.exists(default_path):
|
|
27
27
|
os.makedirs(default_path)
|
|
28
28
|
webai = [
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
29
|
+
"leo",
|
|
30
|
+
"openai",
|
|
31
|
+
"opengpt",
|
|
32
|
+
"koboldai",
|
|
33
|
+
"gemini",
|
|
34
|
+
"phind",
|
|
35
|
+
"blackboxai",
|
|
36
|
+
"g4fauto",
|
|
37
|
+
"perplexity",
|
|
38
|
+
"groq",
|
|
39
|
+
"reka",
|
|
40
|
+
"cohere",
|
|
41
|
+
"yepchat",
|
|
42
|
+
"you",
|
|
43
|
+
"xjai",
|
|
44
|
+
"thinkany",
|
|
45
|
+
"berlin4h",
|
|
46
|
+
"chatgptuk",
|
|
47
|
+
"auto",
|
|
48
|
+
"poe",
|
|
48
49
|
]
|
|
49
|
-
|
|
50
50
|
gpt4free_providers = [
|
|
51
51
|
provider.__name__ for provider in g4f.Provider.__providers__ # if provider.working
|
|
52
52
|
]
|
|
@@ -45,19 +45,6 @@ alpaca: dict[str, Union[str, list]] = {
|
|
|
45
45
|
"bot_postfix": "\n\n",
|
|
46
46
|
"stops": ['###', 'Instruction:', '\n\n\n']
|
|
47
47
|
}
|
|
48
|
-
|
|
49
|
-
# https://docs.mistral.ai/models/
|
|
50
|
-
# As a reference, here is the format used to tokenize instructions during fine-tuning:
|
|
51
|
-
# ```
|
|
52
|
-
# [START_SYMBOL_ID] +
|
|
53
|
-
# tok("[INST]") + tok(USER_MESSAGE_1) + tok("[/INST]") +
|
|
54
|
-
# tok(BOT_MESSAGE_1) + [END_SYMBOL_ID] +
|
|
55
|
-
# …
|
|
56
|
-
# tok("[INST]") + tok(USER_MESSAGE_N) + tok("[/INST]") +
|
|
57
|
-
# tok(BOT_MESSAGE_N) + [END_SYMBOL_ID]
|
|
58
|
-
# ```
|
|
59
|
-
# In the pseudo-code above, note that the tokenize method should not add a BOS or EOS token automatically, but should add a prefix space.
|
|
60
|
-
|
|
61
48
|
mistral_instruct: dict[str, Union[str, list]] = {
|
|
62
49
|
"system_prefix": "",
|
|
63
50
|
"system_content": "",
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
from poe_api_wrapper import PoeApi
|
|
2
|
+
from poe_api_wrapper.api import BOTS_LIST
|
|
3
|
+
from ..AIbase import Provider
|
|
4
|
+
from ..AIutel import Conversation
|
|
5
|
+
from ..AIutel import Optimizers
|
|
6
|
+
from ..AIutel import AwesomePrompts
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from json import loads
|
|
9
|
+
from json import dumps
|
|
10
|
+
from loguru import logger
|
|
11
|
+
import logging
|
|
12
|
+
|
|
13
|
+
logger.remove()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class POE(Provider):
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
cookie: str,
|
|
20
|
+
model: str = "Assistant",
|
|
21
|
+
proxy: bool = False,
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
filepath: str = None,
|
|
24
|
+
update_file: str = True,
|
|
25
|
+
intro: str = None,
|
|
26
|
+
act: str = None,
|
|
27
|
+
init: bool = True,
|
|
28
|
+
):
|
|
29
|
+
"""Initializes POE
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
cookie (str): Path to `poe.com.cookies.json` file or 'p-b' cookie-value.
|
|
33
|
+
model (str, optional): Model name. Default to Assistant.
|
|
34
|
+
proxy (bool, optional): Flag for Httpx request proxy. Defaults to False.
|
|
35
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
36
|
+
filepath (str, optional): Path to save the chat history. Defaults to None.
|
|
37
|
+
update_file (str, optional): Flag for controlling chat history updates. Defaults to True.
|
|
38
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
39
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
40
|
+
init (bool, optional): Resend the intro prompt. Defaults to True.
|
|
41
|
+
"""
|
|
42
|
+
assert isinstance(
|
|
43
|
+
cookie, str
|
|
44
|
+
), f"Cookie must be of {str} datatype only not {type(cookie)}"
|
|
45
|
+
assert (
|
|
46
|
+
model in BOTS_LIST.keys()
|
|
47
|
+
), f"model name '{model}' is not one of {', '.join(list(BOTS_LIST.keys()))}"
|
|
48
|
+
cookie_path = Path(cookie)
|
|
49
|
+
|
|
50
|
+
if cookie_path.exists() or any(["/" in cookie, ".json" in cookie]):
|
|
51
|
+
cookie = None
|
|
52
|
+
all_cookies = loads(cookie_path.read_text())
|
|
53
|
+
for entry in all_cookies:
|
|
54
|
+
if entry["name"] == "p-b":
|
|
55
|
+
cookie = entry["value"]
|
|
56
|
+
assert (
|
|
57
|
+
cookie
|
|
58
|
+
), f'Required cookie value cannot be retrieved from the path "{cookie_path.as_posix()}"'
|
|
59
|
+
|
|
60
|
+
if proxy:
|
|
61
|
+
import poe_api_wrapper.proxies as proxies
|
|
62
|
+
|
|
63
|
+
proxies.PROXY = True
|
|
64
|
+
|
|
65
|
+
self.bot = BOTS_LIST[model]
|
|
66
|
+
self.session = PoeApi(cookie)
|
|
67
|
+
self.last_response = {}
|
|
68
|
+
self.__available_optimizers = (
|
|
69
|
+
method
|
|
70
|
+
for method in dir(Optimizers)
|
|
71
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
72
|
+
)
|
|
73
|
+
Conversation.intro = (
|
|
74
|
+
AwesomePrompts().get_act(
|
|
75
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
76
|
+
)
|
|
77
|
+
if act
|
|
78
|
+
else intro or Conversation.intro
|
|
79
|
+
)
|
|
80
|
+
self.conversation = Conversation(
|
|
81
|
+
status=False, filepath=filepath, update_file=update_file
|
|
82
|
+
)
|
|
83
|
+
if init:
|
|
84
|
+
self.ask(self.conversation.intro) # Init
|
|
85
|
+
|
|
86
|
+
def ask(
|
|
87
|
+
self,
|
|
88
|
+
prompt: str,
|
|
89
|
+
stream: bool = False,
|
|
90
|
+
raw: bool = False,
|
|
91
|
+
optimizer: str = None,
|
|
92
|
+
conversationally: bool = False,
|
|
93
|
+
) -> dict:
|
|
94
|
+
"""Chat with AI
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
prompt (str): Prompt to be send.
|
|
98
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
99
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
100
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defeaults to None
|
|
101
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
102
|
+
Returns:
|
|
103
|
+
dict : {}
|
|
104
|
+
```json
|
|
105
|
+
{
|
|
106
|
+
"id": "TWVzc2FnZToxMTU0MzgyNDQ1ODU=",
|
|
107
|
+
"messageId": 115438244585,
|
|
108
|
+
"creationTime": 1707777376544407,
|
|
109
|
+
"clientNonce": null,
|
|
110
|
+
"state": "complete",
|
|
111
|
+
"text": "Hello! How can I assist you today?",
|
|
112
|
+
"author": "capybara",
|
|
113
|
+
"contentType": "text_markdown",
|
|
114
|
+
"sourceType": "chat_input",
|
|
115
|
+
"attachmentTruncationState": "not_truncated",
|
|
116
|
+
"attachments": [],
|
|
117
|
+
"vote": null,
|
|
118
|
+
"suggestedReplies": [],
|
|
119
|
+
"hasCitations": false,
|
|
120
|
+
"__isNode": "Message",
|
|
121
|
+
"textLengthOnCancellation": null,
|
|
122
|
+
"chatCode": "21a2jn0yrq9phxiy478",
|
|
123
|
+
"chatId": 328236777,
|
|
124
|
+
"title": null,
|
|
125
|
+
"response": ""
|
|
126
|
+
}
|
|
127
|
+
```
|
|
128
|
+
"""
|
|
129
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
130
|
+
if optimizer:
|
|
131
|
+
if optimizer in self.__available_optimizers:
|
|
132
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
133
|
+
conversation_prompt if conversationally else prompt
|
|
134
|
+
)
|
|
135
|
+
else:
|
|
136
|
+
raise Exception(
|
|
137
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
def for_stream():
|
|
141
|
+
for response in self.session.send_message(self.bot, conversation_prompt):
|
|
142
|
+
if raw:
|
|
143
|
+
yield dumps(response)
|
|
144
|
+
else:
|
|
145
|
+
yield response
|
|
146
|
+
|
|
147
|
+
self.last_response.update(response)
|
|
148
|
+
|
|
149
|
+
self.conversation.update_chat_history(
|
|
150
|
+
prompt,
|
|
151
|
+
self.get_message(self.last_response),
|
|
152
|
+
force=True,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
def for_non_stream():
|
|
156
|
+
# let's make use of stream
|
|
157
|
+
for _ in for_stream():
|
|
158
|
+
pass
|
|
159
|
+
return self.last_response
|
|
160
|
+
|
|
161
|
+
return for_stream() if stream else for_non_stream()
|
|
162
|
+
|
|
163
|
+
def chat(
|
|
164
|
+
self,
|
|
165
|
+
prompt: str,
|
|
166
|
+
stream: bool = False,
|
|
167
|
+
optimizer: str = None,
|
|
168
|
+
conversationally: bool = False,
|
|
169
|
+
) -> str:
|
|
170
|
+
"""Generate response `str`
|
|
171
|
+
Args:
|
|
172
|
+
prompt (str): Prompt to be send.
|
|
173
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
174
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
175
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
176
|
+
Returns:
|
|
177
|
+
str: Response generated
|
|
178
|
+
"""
|
|
179
|
+
|
|
180
|
+
def for_stream():
|
|
181
|
+
for response in self.ask(
|
|
182
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
183
|
+
):
|
|
184
|
+
yield self.get_message(response)
|
|
185
|
+
|
|
186
|
+
def for_non_stream():
|
|
187
|
+
return self.get_message(
|
|
188
|
+
self.ask(
|
|
189
|
+
prompt,
|
|
190
|
+
False,
|
|
191
|
+
optimizer=optimizer,
|
|
192
|
+
conversationally=conversationally,
|
|
193
|
+
)
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
return for_stream() if stream else for_non_stream()
|
|
197
|
+
|
|
198
|
+
def get_message(self, response: dict) -> str:
|
|
199
|
+
"""Retrieves message only from response
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
response (dict): Response generated by `self.ask`
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
str: Message extracted
|
|
206
|
+
"""
|
|
207
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
208
|
+
return response["text"]
|
|
@@ -27,7 +27,7 @@ from .Youchat import YouChat
|
|
|
27
27
|
from .Gemini import GEMINI
|
|
28
28
|
from .Berlin4h import Berlin4h
|
|
29
29
|
from .ChatGPTUK import ChatGPTUK
|
|
30
|
-
|
|
30
|
+
from .Poe import POE
|
|
31
31
|
__all__ = [
|
|
32
32
|
'ThinkAnyAI',
|
|
33
33
|
'Xjai',
|
|
@@ -56,4 +56,5 @@ __all__ = [
|
|
|
56
56
|
'GEMINI',
|
|
57
57
|
'Berlin4h',
|
|
58
58
|
'ChatGPTUK',
|
|
59
|
+
'POE'
|
|
59
60
|
]
|
|
@@ -9,35 +9,7 @@ from .LLM import LLM
|
|
|
9
9
|
# from .Local import *
|
|
10
10
|
import g4f
|
|
11
11
|
# Import provider classes for direct access
|
|
12
|
-
from .Provider import
|
|
13
|
-
ThinkAnyAI,
|
|
14
|
-
Xjai,
|
|
15
|
-
LLAMA2,
|
|
16
|
-
AsyncLLAMA2,
|
|
17
|
-
Cohere,
|
|
18
|
-
REKA,
|
|
19
|
-
GROQ,
|
|
20
|
-
AsyncGROQ,
|
|
21
|
-
OPENAI,
|
|
22
|
-
AsyncOPENAI,
|
|
23
|
-
LEO,
|
|
24
|
-
AsyncLEO,
|
|
25
|
-
KOBOLDAI,
|
|
26
|
-
AsyncKOBOLDAI,
|
|
27
|
-
OPENGPT,
|
|
28
|
-
AsyncOPENGPT,
|
|
29
|
-
PERPLEXITY,
|
|
30
|
-
BLACKBOXAI,
|
|
31
|
-
AsyncBLACKBOXAI,
|
|
32
|
-
PhindSearch,
|
|
33
|
-
AsyncPhindSearch,
|
|
34
|
-
YEPCHAT,
|
|
35
|
-
AsyncYEPCHAT,
|
|
36
|
-
YouChat,
|
|
37
|
-
GEMINI,
|
|
38
|
-
Berlin4h,
|
|
39
|
-
ChatGPTUK,
|
|
40
|
-
)
|
|
12
|
+
from .Provider import *
|
|
41
13
|
|
|
42
14
|
__repo__ = "https://github.com/OE-LUCIFER/Webscout"
|
|
43
15
|
|
|
@@ -61,6 +33,7 @@ webai = [
|
|
|
61
33
|
"berlin4h",
|
|
62
34
|
"chatgptuk",
|
|
63
35
|
"auto",
|
|
36
|
+
"poe",
|
|
64
37
|
]
|
|
65
38
|
|
|
66
39
|
gpt4free_providers = [
|
|
@@ -113,6 +86,7 @@ __all__ = [
|
|
|
113
86
|
"GEMINI",
|
|
114
87
|
"Berlin4h",
|
|
115
88
|
"ChatGPTUK",
|
|
89
|
+
"POE"
|
|
116
90
|
]
|
|
117
91
|
|
|
118
92
|
import logging
|
|
@@ -413,7 +413,23 @@ class Main(cmd.Cmd):
|
|
|
413
413
|
"No working g4f provider found. "
|
|
414
414
|
"Consider running 'webscout gpt4free test -y' first"
|
|
415
415
|
)
|
|
416
|
+
elif provider == "poe":
|
|
417
|
+
assert auth, (
|
|
418
|
+
"Path to poe.com.cookies.json file or 'p-b' cookie-value is required. "
|
|
419
|
+
"Use the flag `--key` or `-k`"
|
|
420
|
+
)
|
|
421
|
+
from webscout import POE
|
|
416
422
|
|
|
423
|
+
self.bot = POE(
|
|
424
|
+
cookie=auth,
|
|
425
|
+
model=getOr(model, "Assistant"),
|
|
426
|
+
proxy=bool(proxies),
|
|
427
|
+
timeout=timeout,
|
|
428
|
+
filepath=filepath,
|
|
429
|
+
update_file=update_file,
|
|
430
|
+
intro=intro,
|
|
431
|
+
act=awesome_prompt,
|
|
432
|
+
)
|
|
417
433
|
elif provider == "leo":
|
|
418
434
|
from webscout import LEO
|
|
419
435
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.6
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -50,7 +50,8 @@ Requires-Dist: appdirs
|
|
|
50
50
|
Requires-Dist: GoogleBard1>=2.1.4
|
|
51
51
|
Requires-Dist: tls_client
|
|
52
52
|
Requires-Dist: clipman
|
|
53
|
-
Requires-Dist: Helpingai-
|
|
53
|
+
Requires-Dist: Helpingai-T2
|
|
54
|
+
Requires-Dist: playsound
|
|
54
55
|
Provides-Extra: dev
|
|
55
56
|
Requires-Dist: ruff>=0.1.6; extra == "dev"
|
|
56
57
|
Requires-Dist: pytest>=7.4.2; extra == "dev"
|
|
@@ -134,6 +135,7 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
|
|
|
134
135
|
- [13. `ThinkAny` - AI search engine](#13-thinkany---ai-search-engine)
|
|
135
136
|
- [14. `chatgptuk` - Chat with gemini-pro](#14-chatgptuk---chat-with-gemini-pro)
|
|
136
137
|
- [`LLM`](#llm)
|
|
138
|
+
- [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
|
|
137
139
|
- [`LLM` with internet](#llm-with-internet)
|
|
138
140
|
- [LLM with deepwebs](#llm-with-deepwebs)
|
|
139
141
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
@@ -1224,6 +1226,26 @@ while True:
|
|
|
1224
1226
|
# Print the response
|
|
1225
1227
|
print("AI: ", response)
|
|
1226
1228
|
```
|
|
1229
|
+
### `Local-LLM` webscout can now run GGUF models
|
|
1230
|
+
```python
|
|
1231
|
+
from webscout.Local.utils import download_model
|
|
1232
|
+
from webscout.Local.model import Model
|
|
1233
|
+
from webscout.Local.thread import Thread
|
|
1234
|
+
from webscout.Local import formats
|
|
1235
|
+
# 1. Download the model
|
|
1236
|
+
repo_id = "microsoft/Phi-3-mini-4k-instruct-gguf" # Replace with the desired Hugging Face repo
|
|
1237
|
+
filename = "Phi-3-mini-4k-instruct-q4.gguf" # Replace with the correct filename
|
|
1238
|
+
model_path = download_model(repo_id, filename)
|
|
1239
|
+
|
|
1240
|
+
# 2. Load the model
|
|
1241
|
+
model = Model(model_path, n_gpu_layers=4)
|
|
1242
|
+
|
|
1243
|
+
# 3. Create a Thread for conversation
|
|
1244
|
+
thread = Thread(model, formats.phi3)
|
|
1245
|
+
|
|
1246
|
+
# 4. Start interacting with the model
|
|
1247
|
+
thread.interact()
|
|
1248
|
+
```
|
|
1227
1249
|
### `LLM` with internet
|
|
1228
1250
|
```python
|
|
1229
1251
|
from __future__ import annotations
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|