webscout 2.6__py3-none-any.whl → 2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/LLM.py +56 -1
- webscout/Local/_version.py +1 -1
- webscout/Local/formats.py +154 -88
- webscout/Local/model.py +4 -4
- webscout/Local/thread.py +166 -156
- webscout/Provider/BasedGPT.py +226 -0
- webscout/Provider/__init__.py +1 -0
- webscout/__init__.py +2 -2
- webscout/cli.py +39 -3
- webscout/version.py +1 -1
- webscout/webscout_search.py +1018 -40
- webscout/webscout_search_async.py +151 -839
- {webscout-2.6.dist-info → webscout-2.8.dist-info}/METADATA +37 -21
- {webscout-2.6.dist-info → webscout-2.8.dist-info}/RECORD +18 -17
- {webscout-2.6.dist-info → webscout-2.8.dist-info}/LICENSE.md +0 -0
- {webscout-2.6.dist-info → webscout-2.8.dist-info}/WHEEL +0 -0
- {webscout-2.6.dist-info → webscout-2.8.dist-info}/entry_points.txt +0 -0
- {webscout-2.6.dist-info → webscout-2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
from selenium import webdriver
|
|
4
|
+
from selenium.webdriver.chrome.options import Options
|
|
5
|
+
from selenium.webdriver.common.by import By
|
|
6
|
+
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
+
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
+
import click
|
|
9
|
+
import requests
|
|
10
|
+
from requests import get
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
from re import findall
|
|
13
|
+
from requests.exceptions import RequestException
|
|
14
|
+
from curl_cffi.requests import get, RequestsError
|
|
15
|
+
import g4f
|
|
16
|
+
from random import randint
|
|
17
|
+
from PIL import Image
|
|
18
|
+
import io
|
|
19
|
+
import re
|
|
20
|
+
import json
|
|
21
|
+
import yaml
|
|
22
|
+
from ..AIutel import Optimizers
|
|
23
|
+
from ..AIutel import Conversation
|
|
24
|
+
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from ..AIbase import Provider, AsyncProvider
|
|
26
|
+
from webscout import exceptions
|
|
27
|
+
from typing import Any, AsyncGenerator, Dict
|
|
28
|
+
import logging
|
|
29
|
+
import httpx
|
|
30
|
+
|
|
31
|
+
class BasedGPT(Provider):
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
is_conversation: bool = True,
|
|
35
|
+
max_tokens: int = 600,
|
|
36
|
+
timeout: int = 30,
|
|
37
|
+
intro: str = None,
|
|
38
|
+
filepath: str = None,
|
|
39
|
+
update_file: bool = True,
|
|
40
|
+
proxies: dict = {},
|
|
41
|
+
history_offset: int = 10250,
|
|
42
|
+
act: str = None,
|
|
43
|
+
system_prompt: str = "Be Helpful and Friendly",
|
|
44
|
+
):
|
|
45
|
+
"""Instantiates BasedGPT
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
49
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
50
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
51
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
52
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
53
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
54
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
55
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
56
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
57
|
+
system_prompt (str, optional): System prompt for BasedGPT. Defaults to "Be Helpful and Friendly".
|
|
58
|
+
"""
|
|
59
|
+
self.session = requests.Session()
|
|
60
|
+
self.is_conversation = is_conversation
|
|
61
|
+
self.max_tokens_to_sample = max_tokens
|
|
62
|
+
self.chat_endpoint = "https://www.basedgpt.chat/api/chat"
|
|
63
|
+
self.stream_chunk_size = 64
|
|
64
|
+
self.timeout = timeout
|
|
65
|
+
self.last_response = {}
|
|
66
|
+
self.system_prompt = system_prompt
|
|
67
|
+
|
|
68
|
+
self.__available_optimizers = (
|
|
69
|
+
method
|
|
70
|
+
for method in dir(Optimizers)
|
|
71
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
72
|
+
)
|
|
73
|
+
self.session.headers.update(
|
|
74
|
+
{"Content-Type": "application/json"}
|
|
75
|
+
)
|
|
76
|
+
Conversation.intro = (
|
|
77
|
+
AwesomePrompts().get_act(
|
|
78
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
79
|
+
)
|
|
80
|
+
if act
|
|
81
|
+
else intro or Conversation.intro
|
|
82
|
+
)
|
|
83
|
+
self.conversation = Conversation(
|
|
84
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
85
|
+
)
|
|
86
|
+
self.conversation.history_offset = history_offset
|
|
87
|
+
self.session.proxies = proxies
|
|
88
|
+
|
|
89
|
+
def ask(
|
|
90
|
+
self,
|
|
91
|
+
prompt: str,
|
|
92
|
+
stream: bool = False,
|
|
93
|
+
raw: bool = False,
|
|
94
|
+
optimizer: str = None,
|
|
95
|
+
conversationally: bool = False,
|
|
96
|
+
) -> dict:
|
|
97
|
+
"""Chat with AI
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
prompt (str): Prompt to be send.
|
|
101
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
102
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
103
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
104
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
105
|
+
Returns:
|
|
106
|
+
dict : {}
|
|
107
|
+
```json
|
|
108
|
+
{
|
|
109
|
+
"id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
|
|
110
|
+
"object": "chat.completion",
|
|
111
|
+
"created": 1704623244,
|
|
112
|
+
"model": "gpt-3.5-turbo",
|
|
113
|
+
"usage": {
|
|
114
|
+
"prompt_tokens": 0,
|
|
115
|
+
"completion_tokens": 0,
|
|
116
|
+
"total_tokens": 0
|
|
117
|
+
},
|
|
118
|
+
"choices": [
|
|
119
|
+
{
|
|
120
|
+
"message": {
|
|
121
|
+
"role": "assistant",
|
|
122
|
+
"content": "Hello! How can I assist you today?"
|
|
123
|
+
},
|
|
124
|
+
"finish_reason": "stop",
|
|
125
|
+
"index": 0
|
|
126
|
+
}
|
|
127
|
+
]
|
|
128
|
+
}
|
|
129
|
+
```
|
|
130
|
+
"""
|
|
131
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
132
|
+
if optimizer:
|
|
133
|
+
if optimizer in self.__available_optimizers:
|
|
134
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
135
|
+
conversation_prompt if conversationally else prompt
|
|
136
|
+
)
|
|
137
|
+
else:
|
|
138
|
+
raise Exception(
|
|
139
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
payload = {
|
|
143
|
+
"messages": [
|
|
144
|
+
{"role": "system", "content": self.system_prompt},
|
|
145
|
+
{"role": "user", "content": conversation_prompt},
|
|
146
|
+
],
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
def for_stream():
|
|
150
|
+
response = self.session.post(
|
|
151
|
+
self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
152
|
+
)
|
|
153
|
+
if not response.ok:
|
|
154
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
155
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
message_load = ""
|
|
159
|
+
for value in response.iter_lines(
|
|
160
|
+
decode_unicode=True,
|
|
161
|
+
delimiter="",
|
|
162
|
+
chunk_size=self.stream_chunk_size,
|
|
163
|
+
):
|
|
164
|
+
try:
|
|
165
|
+
message_load += value
|
|
166
|
+
yield value if raw else dict(text=message_load)
|
|
167
|
+
except json.decoder.JSONDecodeError:
|
|
168
|
+
pass
|
|
169
|
+
self.last_response.update(dict(text=message_load))
|
|
170
|
+
self.conversation.update_chat_history(
|
|
171
|
+
prompt, self.get_message(self.last_response)
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
def for_non_stream():
|
|
175
|
+
for _ in for_stream():
|
|
176
|
+
pass
|
|
177
|
+
return self.last_response
|
|
178
|
+
|
|
179
|
+
return for_stream() if stream else for_non_stream()
|
|
180
|
+
|
|
181
|
+
def chat(
|
|
182
|
+
self,
|
|
183
|
+
prompt: str,
|
|
184
|
+
stream: bool = False,
|
|
185
|
+
optimizer: str = None,
|
|
186
|
+
conversationally: bool = False,
|
|
187
|
+
) -> str:
|
|
188
|
+
"""Generate response `str`
|
|
189
|
+
Args:
|
|
190
|
+
prompt (str): Prompt to be send.
|
|
191
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
192
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
193
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
194
|
+
Returns:
|
|
195
|
+
str: Response generated
|
|
196
|
+
"""
|
|
197
|
+
|
|
198
|
+
def for_stream():
|
|
199
|
+
for response in self.ask(
|
|
200
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
201
|
+
):
|
|
202
|
+
yield self.get_message(response)
|
|
203
|
+
|
|
204
|
+
def for_non_stream():
|
|
205
|
+
return self.get_message(
|
|
206
|
+
self.ask(
|
|
207
|
+
prompt,
|
|
208
|
+
False,
|
|
209
|
+
optimizer=optimizer,
|
|
210
|
+
conversationally=conversationally,
|
|
211
|
+
)
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
return for_stream() if stream else for_non_stream()
|
|
215
|
+
|
|
216
|
+
def get_message(self, response: dict) -> str:
|
|
217
|
+
"""Retrieves message only from response
|
|
218
|
+
|
|
219
|
+
Args:
|
|
220
|
+
response (dict): Response generated by `self.ask`
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
str: Message extracted
|
|
224
|
+
"""
|
|
225
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
226
|
+
return response["text"]
|
webscout/Provider/__init__.py
CHANGED
webscout/__init__.py
CHANGED
|
@@ -4,7 +4,7 @@ from .version import __version__
|
|
|
4
4
|
from .DWEBS import DeepWEBS
|
|
5
5
|
from .transcriber import transcriber
|
|
6
6
|
from .voice import play_audio
|
|
7
|
-
from .tempid import Client as TempMailClient, TemporaryPhoneNumber
|
|
7
|
+
# from .tempid import Client as TempMailClient, TemporaryPhoneNumber
|
|
8
8
|
from .LLM import LLM
|
|
9
9
|
# from .Local import *
|
|
10
10
|
import g4f
|
|
@@ -90,4 +90,4 @@ __all__ = [
|
|
|
90
90
|
]
|
|
91
91
|
|
|
92
92
|
import logging
|
|
93
|
-
logging.getLogger("webscout").addHandler(logging.NullHandler())
|
|
93
|
+
logging.getLogger("webscout").addHandler(logging.NullHandler())
|
webscout/cli.py
CHANGED
|
@@ -4,12 +4,12 @@ import os
|
|
|
4
4
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
5
5
|
from datetime import datetime
|
|
6
6
|
from urllib.parse import unquote
|
|
7
|
-
|
|
7
|
+
from pathlib import Path
|
|
8
8
|
import click
|
|
9
9
|
from curl_cffi import requests
|
|
10
10
|
|
|
11
11
|
from .webscout_search import WEBS
|
|
12
|
-
from .utils import json_dumps
|
|
12
|
+
from .utils import json_dumps, json_loads
|
|
13
13
|
from .version import __version__
|
|
14
14
|
|
|
15
15
|
logger = logging.getLogger(__name__)
|
|
@@ -129,7 +129,43 @@ def safe_entry_point():
|
|
|
129
129
|
def version():
|
|
130
130
|
print(__version__)
|
|
131
131
|
return __version__
|
|
132
|
-
|
|
132
|
+
@cli.command()
|
|
133
|
+
@click.option("-s", "--save", is_flag=True, default=False, help="save the conversation in the json file")
|
|
134
|
+
@click.option("-p", "--proxy", default=None, help="the proxy to send requests, example: socks5://localhost:9150")
|
|
135
|
+
def chat(save, proxy):
|
|
136
|
+
"""CLI function to perform an interactive AI chat using DuckDuckGo API."""
|
|
137
|
+
cache_file = "WEBS_chat_conversation.json"
|
|
138
|
+
models = ["gpt-3.5", "claude-3-haiku"]
|
|
139
|
+
client = WEBS(proxy=proxy)
|
|
140
|
+
|
|
141
|
+
print("DuckDuckGo AI chat. Available models:")
|
|
142
|
+
for idx, model in enumerate(models, start=1):
|
|
143
|
+
print(f"{idx}. {model}")
|
|
144
|
+
chosen_model_idx = input("Choose a model by entering its number[1]: ")
|
|
145
|
+
chosen_model_idx = 0 if not chosen_model_idx.strip() else int(chosen_model_idx) - 1
|
|
146
|
+
model = models[chosen_model_idx]
|
|
147
|
+
print(f"Using model: {model}")
|
|
148
|
+
|
|
149
|
+
if save and Path(cache_file).exists():
|
|
150
|
+
with open(cache_file) as f:
|
|
151
|
+
cache = json_loads(f.read())
|
|
152
|
+
client._chat_vqd = cache.get("vqd", None)
|
|
153
|
+
client._chat_messages = cache.get("messages", [])
|
|
154
|
+
|
|
155
|
+
while True:
|
|
156
|
+
user_input = input(f"{'-'*78}\nYou: ")
|
|
157
|
+
if not user_input.strip():
|
|
158
|
+
break
|
|
159
|
+
|
|
160
|
+
resp_answer = client.chat(keywords=user_input, model=model)
|
|
161
|
+
text = click.wrap_text(resp_answer, width=78, preserve_paragraphs=True)
|
|
162
|
+
click.secho(f"AI: {text}", bg="black", fg="green", overline=True)
|
|
163
|
+
|
|
164
|
+
cache = {"vqd": client._chat_vqd, "messages": client._chat_messages}
|
|
165
|
+
_save_json(cache_file, cache)
|
|
166
|
+
|
|
167
|
+
if "exit" in user_input.lower() or "quit" in user_input.lower():
|
|
168
|
+
break
|
|
133
169
|
|
|
134
170
|
@cli.command()
|
|
135
171
|
@click.option("-k", "--keywords", required=True, help="text search, keywords for query")
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "2.
|
|
1
|
+
__version__ = "2.7"
|
|
2
2
|
|