webscout 4.5__py3-none-any.whl → 4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +10 -0
- webscout/Extra/gguf.py +1 -1
- webscout/Provider/BasedGPT.py +38 -36
- webscout/Provider/Blackboxai.py +14 -10
- webscout/Provider/DARKAI.py +207 -0
- webscout/Provider/Deepseek.py +79 -133
- webscout/Provider/Llama3.py +173 -0
- webscout/Provider/PizzaGPT.py +178 -0
- webscout/Provider/RUBIKSAI.py +201 -0
- webscout/Provider/__init__.py +14 -3
- webscout/Provider/koala.py +239 -0
- webscout/Provider/meta.py +778 -0
- webscout/__init__.py +1 -0
- webscout/exceptions.py +6 -0
- webscout/version.py +1 -1
- webscout/webai.py +15 -1
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/METADATA +40 -60
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/RECORD +22 -16
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/WHEEL +1 -1
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/LICENSE.md +0 -0
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/entry_points.txt +0 -0
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/top_level.txt +0 -0
webscout/AIutel.py
CHANGED
|
@@ -54,6 +54,7 @@ webai = [
|
|
|
54
54
|
"geminipro",
|
|
55
55
|
"ollama",
|
|
56
56
|
"andi",
|
|
57
|
+
"llama3"
|
|
57
58
|
]
|
|
58
59
|
|
|
59
60
|
gpt4free_providers = [
|
|
@@ -533,6 +534,15 @@ LLM:
|
|
|
533
534
|
```python
|
|
534
535
|
print("The essay is about...")
|
|
535
536
|
```
|
|
537
|
+
|
|
538
|
+
3. User: Weather in qazigund
|
|
539
|
+
|
|
540
|
+
LLM:
|
|
541
|
+
```python
|
|
542
|
+
from webscout import weather as w
|
|
543
|
+
weather = w.get("Qazigund")
|
|
544
|
+
w.print_weather(weather)
|
|
545
|
+
```
|
|
536
546
|
"""
|
|
537
547
|
|
|
538
548
|
|
webscout/Extra/gguf.py
CHANGED
|
@@ -153,7 +153,7 @@ huggingface-cli download "$MODEL_ID" --local-dir "./${MODEL_NAME}" --local-dir-u
|
|
|
153
153
|
# Convert to fp16
|
|
154
154
|
FP16="${MODEL_NAME}/${MODEL_NAME,,}.fp16.bin"
|
|
155
155
|
echo "Converting the model to fp16..."
|
|
156
|
-
python3 llama.cpp/
|
|
156
|
+
python3 llama.cpp/convert_hf_to_gguf.py "$MODEL_NAME" --outtype f16 --outfile "$FP16"
|
|
157
157
|
|
|
158
158
|
# Quantize the model
|
|
159
159
|
echo "Quantizing the model..."
|
webscout/Provider/BasedGPT.py
CHANGED
|
@@ -40,7 +40,7 @@ class BasedGPT(Provider):
|
|
|
40
40
|
proxies: dict = {},
|
|
41
41
|
history_offset: int = 10250,
|
|
42
42
|
act: str = None,
|
|
43
|
-
|
|
43
|
+
model: str = "gpt-3.5-turbo"
|
|
44
44
|
):
|
|
45
45
|
"""Instantiates BasedGPT
|
|
46
46
|
|
|
@@ -54,25 +54,40 @@ class BasedGPT(Provider):
|
|
|
54
54
|
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
55
55
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
56
56
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
57
|
-
|
|
57
|
+
model (str, optional): Model to use for generating text. Defaults to "gpt-3.5-turbo".
|
|
58
58
|
"""
|
|
59
59
|
self.session = requests.Session()
|
|
60
60
|
self.is_conversation = is_conversation
|
|
61
61
|
self.max_tokens_to_sample = max_tokens
|
|
62
62
|
self.chat_endpoint = "https://www.basedgpt.chat/api/chat"
|
|
63
|
-
self.stream_chunk_size = 64
|
|
64
63
|
self.timeout = timeout
|
|
65
64
|
self.last_response = {}
|
|
66
|
-
self.
|
|
65
|
+
self.model = model
|
|
66
|
+
self.headers = {
|
|
67
|
+
"accept": "*/*",
|
|
68
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
69
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
70
|
+
"content-length": "109",
|
|
71
|
+
"content-type": "application/json",
|
|
72
|
+
"dnt": "1",
|
|
73
|
+
"origin": "https://www.basedgpt.chat",
|
|
74
|
+
"priority": "u=1, i",
|
|
75
|
+
"referer": "https://www.basedgpt.chat/",
|
|
76
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
77
|
+
"sec-ch-ua-mobile": "?0",
|
|
78
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
79
|
+
"sec-fetch-dest": "empty",
|
|
80
|
+
"sec-fetch-mode": "cors",
|
|
81
|
+
"sec-fetch-site": "same-origin",
|
|
82
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
|
|
83
|
+
}
|
|
67
84
|
|
|
68
85
|
self.__available_optimizers = (
|
|
69
86
|
method
|
|
70
87
|
for method in dir(Optimizers)
|
|
71
88
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
72
89
|
)
|
|
73
|
-
self.session.headers.update(
|
|
74
|
-
{"Content-Type": "application/json"}
|
|
75
|
-
)
|
|
90
|
+
self.session.headers.update(self.headers)
|
|
76
91
|
Conversation.intro = (
|
|
77
92
|
AwesomePrompts().get_act(
|
|
78
93
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -106,25 +121,7 @@ class BasedGPT(Provider):
|
|
|
106
121
|
dict : {}
|
|
107
122
|
```json
|
|
108
123
|
{
|
|
109
|
-
|
|
110
|
-
"object": "chat.completion",
|
|
111
|
-
"created": 1704623244,
|
|
112
|
-
"model": "gpt-3.5-turbo",
|
|
113
|
-
"usage": {
|
|
114
|
-
"prompt_tokens": 0,
|
|
115
|
-
"completion_tokens": 0,
|
|
116
|
-
"total_tokens": 0
|
|
117
|
-
},
|
|
118
|
-
"choices": [
|
|
119
|
-
{
|
|
120
|
-
"message": {
|
|
121
|
-
"role": "assistant",
|
|
122
|
-
"content": "Hello! How can I assist you today?"
|
|
123
|
-
},
|
|
124
|
-
"finish_reason": "stop",
|
|
125
|
-
"index": 0
|
|
126
|
-
}
|
|
127
|
-
]
|
|
124
|
+
"text" : "How may I assist you today?"
|
|
128
125
|
}
|
|
129
126
|
```
|
|
130
127
|
"""
|
|
@@ -139,11 +136,14 @@ class BasedGPT(Provider):
|
|
|
139
136
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
140
137
|
)
|
|
141
138
|
|
|
139
|
+
self.session.headers.update(self.headers)
|
|
142
140
|
payload = {
|
|
143
141
|
"messages": [
|
|
144
|
-
{
|
|
145
|
-
|
|
146
|
-
|
|
142
|
+
{
|
|
143
|
+
"role": "user",
|
|
144
|
+
"content": conversation_prompt
|
|
145
|
+
}
|
|
146
|
+
]
|
|
147
147
|
}
|
|
148
148
|
|
|
149
149
|
def for_stream():
|
|
@@ -151,22 +151,24 @@ class BasedGPT(Provider):
|
|
|
151
151
|
self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
152
152
|
)
|
|
153
153
|
if not response.ok:
|
|
154
|
-
raise
|
|
154
|
+
raise Exception(
|
|
155
155
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
156
156
|
)
|
|
157
157
|
|
|
158
|
-
|
|
158
|
+
streaming_text = ""
|
|
159
159
|
for value in response.iter_lines(
|
|
160
160
|
decode_unicode=True,
|
|
161
|
-
|
|
162
|
-
|
|
161
|
+
chunk_size=64,
|
|
162
|
+
delimiter="\n",
|
|
163
163
|
):
|
|
164
164
|
try:
|
|
165
|
-
|
|
166
|
-
|
|
165
|
+
if bool(value):
|
|
166
|
+
streaming_text += value + ("\n" if stream else "")
|
|
167
|
+
resp = dict(text=streaming_text)
|
|
168
|
+
self.last_response.update(resp)
|
|
169
|
+
yield value if raw else resp
|
|
167
170
|
except json.decoder.JSONDecodeError:
|
|
168
171
|
pass
|
|
169
|
-
self.last_response.update(dict(text=message_load))
|
|
170
172
|
self.conversation.update_chat_history(
|
|
171
173
|
prompt, self.get_message(self.last_response)
|
|
172
174
|
)
|
webscout/Provider/Blackboxai.py
CHANGED
|
@@ -22,14 +22,14 @@ import yaml
|
|
|
22
22
|
from ..AIutel import Optimizers
|
|
23
23
|
from ..AIutel import Conversation
|
|
24
24
|
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
-
from ..AIbase import
|
|
25
|
+
from ..AIbase import Provider, AsyncProvider
|
|
26
26
|
from Helpingai_T2 import Perplexity
|
|
27
27
|
from webscout import exceptions
|
|
28
28
|
from typing import Any, AsyncGenerator, Dict
|
|
29
29
|
import logging
|
|
30
30
|
import httpx
|
|
31
31
|
|
|
32
|
-
#------------------------------------------------------BLACKBOXAI--------------------------------------------------------
|
|
32
|
+
#------------------------------------------------------BLACKBOXAI--------------------------------------------------------
|
|
33
33
|
class BLACKBOXAI:
|
|
34
34
|
def __init__(
|
|
35
35
|
self,
|
|
@@ -234,13 +234,9 @@ class BLACKBOXAI:
|
|
|
234
234
|
"""
|
|
235
235
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
236
236
|
return response["text"]
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
blackbox_ai = BLACKBOXAI() # Initialize a BLACKBOXAI instance
|
|
241
|
-
response = blackbox_ai.ask(prompt) # Perform a chat with the given prompt
|
|
242
|
-
processed_response = blackbox_ai.get_message(response) # Process the response
|
|
243
|
-
print(processed_response)
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
|
|
244
240
|
class AsyncBLACKBOXAI(AsyncProvider):
|
|
245
241
|
def __init__(
|
|
246
242
|
self,
|
|
@@ -437,4 +433,12 @@ class AsyncBLACKBOXAI(AsyncProvider):
|
|
|
437
433
|
str: Message extracted
|
|
438
434
|
"""
|
|
439
435
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
440
|
-
return response["text"]
|
|
436
|
+
return response["text"]
|
|
437
|
+
|
|
438
|
+
# Function to clean the response text
|
|
439
|
+
def clean_response(response_text: str) -> str:
|
|
440
|
+
# Remove web search results
|
|
441
|
+
response_text = re.sub(r'\$@\$v=undefined-rv1\$@\$Sources:.*?\$~~~', '', response_text, flags=re.DOTALL)
|
|
442
|
+
# Remove any remaining special characters or markers
|
|
443
|
+
response_text = re.sub(r'\$~~~', '', response_text)
|
|
444
|
+
return response_text
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
from ..AIutel import Optimizers
|
|
5
|
+
from ..AIutel import Conversation
|
|
6
|
+
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
7
|
+
from ..AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
import requests
|
|
10
|
+
class DARKAI(Provider):
|
|
11
|
+
"""
|
|
12
|
+
A class to interact with the DarkAI API.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
is_conversation: bool = True,
|
|
18
|
+
max_tokens: int = 600,
|
|
19
|
+
timeout: int = 30,
|
|
20
|
+
intro: str = None,
|
|
21
|
+
filepath: str = None,
|
|
22
|
+
update_file: bool = True,
|
|
23
|
+
proxies: dict = {},
|
|
24
|
+
history_offset: int = 10250,
|
|
25
|
+
act: str = None,
|
|
26
|
+
model: str = "gpt-4o", #llama-3-70b, llama-3-405b, gpt-3.5-turbo, gpt-4o
|
|
27
|
+
) -> None:
|
|
28
|
+
"""
|
|
29
|
+
Initializes the DARKAI API with given parameters.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
33
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion.
|
|
34
|
+
Defaults to 600.
|
|
35
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
36
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
37
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
38
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
39
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
40
|
+
history_offset (int, optional): Limit conversation history to this number of last texts.
|
|
41
|
+
Defaults to 10250.
|
|
42
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
43
|
+
model (str, optional): AI model to use. Defaults to "gpt-4o". #llama-3-70b, llama-3-405b, gpt-3.5-turbo, gpt-4o
|
|
44
|
+
"""
|
|
45
|
+
self.session = requests.Session()
|
|
46
|
+
self.is_conversation = is_conversation
|
|
47
|
+
self.max_tokens_to_sample = max_tokens
|
|
48
|
+
self.api_endpoint = "https://darkai.foundation/chat"
|
|
49
|
+
self.stream_chunk_size = 64
|
|
50
|
+
self.timeout = timeout
|
|
51
|
+
self.last_response = {}
|
|
52
|
+
self.model = model
|
|
53
|
+
self.headers = {
|
|
54
|
+
"accept": "text/event-stream",
|
|
55
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
56
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
57
|
+
"content-type": "application/json",
|
|
58
|
+
"dnt": "1",
|
|
59
|
+
"origin": "https://www.aiuncensored.info",
|
|
60
|
+
"referer": "https://www.aiuncensored.info/",
|
|
61
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
62
|
+
"sec-ch-ua-mobile": "?0",
|
|
63
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
64
|
+
"sec-fetch-dest": "empty",
|
|
65
|
+
"sec-fetch-mode": "cors",
|
|
66
|
+
"sec-fetch-site": "cross-site",
|
|
67
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
self.__available_optimizers = (
|
|
71
|
+
method
|
|
72
|
+
for method in dir(Optimizers)
|
|
73
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
74
|
+
)
|
|
75
|
+
self.session.headers.update(self.headers)
|
|
76
|
+
Conversation.intro = (
|
|
77
|
+
AwesomePrompts().get_act(
|
|
78
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
79
|
+
)
|
|
80
|
+
if act
|
|
81
|
+
else intro or Conversation.intro
|
|
82
|
+
)
|
|
83
|
+
self.conversation = Conversation(
|
|
84
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
85
|
+
)
|
|
86
|
+
self.conversation.history_offset = history_offset
|
|
87
|
+
self.session.proxies = proxies
|
|
88
|
+
|
|
89
|
+
def ask(
|
|
90
|
+
self,
|
|
91
|
+
prompt: str,
|
|
92
|
+
stream: bool = False,
|
|
93
|
+
raw: bool = False,
|
|
94
|
+
optimizer: str = None,
|
|
95
|
+
conversationally: bool = False,
|
|
96
|
+
) -> Dict[str, Any]:
|
|
97
|
+
"""
|
|
98
|
+
Sends a prompt to the DarkAI API and returns the response.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
prompt: The text prompt to generate text from.
|
|
102
|
+
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
103
|
+
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
104
|
+
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
105
|
+
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
The response from the API.
|
|
109
|
+
"""
|
|
110
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
111
|
+
if optimizer:
|
|
112
|
+
if optimizer in self.__available_optimizers:
|
|
113
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
114
|
+
conversation_prompt if conversationally else prompt
|
|
115
|
+
)
|
|
116
|
+
else:
|
|
117
|
+
raise Exception(
|
|
118
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
payload = {
|
|
122
|
+
"query": conversation_prompt,
|
|
123
|
+
"model": self.model
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
def for_stream():
|
|
127
|
+
response = self.session.post(
|
|
128
|
+
self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
if not response.ok:
|
|
132
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
133
|
+
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
streaming_response = ""
|
|
137
|
+
for line in response.iter_lines():
|
|
138
|
+
if line:
|
|
139
|
+
decoded_line = line.decode('utf-8')
|
|
140
|
+
if decoded_line.startswith("data:"):
|
|
141
|
+
data = decoded_line[len("data:"):].strip()
|
|
142
|
+
if data:
|
|
143
|
+
try:
|
|
144
|
+
event = json.loads(data)
|
|
145
|
+
if event.get("event") == "final-response":
|
|
146
|
+
message = event['data'].get('message', '')
|
|
147
|
+
streaming_response += message
|
|
148
|
+
yield message if raw else dict(text=streaming_response)
|
|
149
|
+
except json.decoder.JSONDecodeError:
|
|
150
|
+
continue
|
|
151
|
+
self.last_response.update(dict(text=streaming_response))
|
|
152
|
+
self.conversation.update_chat_history(
|
|
153
|
+
prompt, self.get_message(self.last_response)
|
|
154
|
+
)
|
|
155
|
+
def for_non_stream():
|
|
156
|
+
for _ in for_stream():
|
|
157
|
+
pass
|
|
158
|
+
return self.last_response
|
|
159
|
+
|
|
160
|
+
return for_stream() if stream else for_non_stream()
|
|
161
|
+
|
|
162
|
+
def chat(
|
|
163
|
+
self,
|
|
164
|
+
prompt: str,
|
|
165
|
+
stream: bool = False,
|
|
166
|
+
optimizer: str = None,
|
|
167
|
+
conversationally: bool = False,
|
|
168
|
+
) -> str:
|
|
169
|
+
"""Generate response `str`
|
|
170
|
+
Args:
|
|
171
|
+
prompt (str): Prompt to be send.
|
|
172
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
173
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
174
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
175
|
+
Returns:
|
|
176
|
+
str: Response generated
|
|
177
|
+
"""
|
|
178
|
+
|
|
179
|
+
def for_stream():
|
|
180
|
+
for response in self.ask(
|
|
181
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
182
|
+
):
|
|
183
|
+
yield self.get_message(response)
|
|
184
|
+
|
|
185
|
+
def for_non_stream():
|
|
186
|
+
return self.get_message(
|
|
187
|
+
self.ask(
|
|
188
|
+
prompt,
|
|
189
|
+
False,
|
|
190
|
+
optimizer=optimizer,
|
|
191
|
+
conversationally=conversationally,
|
|
192
|
+
)
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
return for_stream() if stream else for_non_stream()
|
|
196
|
+
|
|
197
|
+
def get_message(self, response: dict) -> str:
|
|
198
|
+
"""Retrieves message only from response
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
response (dict): Response generated by `self.ask`
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
str: Message extracted
|
|
205
|
+
"""
|
|
206
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
207
|
+
return response["text"]
|