webscout 2.1__py3-none-any.whl → 2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -1
- webscout/Provider/Berlin4h.py +211 -0
- webscout/Provider/ChatGPTUK.py +214 -0
- webscout/Provider/__init__.py +4 -2
- webscout/__init__.py +6 -3
- webscout/webai.py +28 -14
- {webscout-2.1.dist-info → webscout-2.2.dist-info}/METADATA +43 -11
- {webscout-2.1.dist-info → webscout-2.2.dist-info}/RECORD +12 -10
- {webscout-2.1.dist-info → webscout-2.2.dist-info}/LICENSE.md +0 -0
- {webscout-2.1.dist-info → webscout-2.2.dist-info}/WHEEL +0 -0
- {webscout-2.1.dist-info → webscout-2.2.dist-info}/entry_points.txt +0 -0
- {webscout-2.1.dist-info → webscout-2.2.dist-info}/top_level.txt +0 -0
webscout/AIutel.py
CHANGED
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import uuid
|
|
4
|
+
from typing import Any, Dict, Optional
|
|
5
|
+
from ..AIutel import Optimizers
|
|
6
|
+
from ..AIutel import Conversation
|
|
7
|
+
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
8
|
+
from ..AIbase import Provider, AsyncProvider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
|
|
11
|
+
class Berlin4h(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the Berlin4h AI API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
api_token: str = "3bf369cd84339603f8a5361e964f9ebe",
|
|
19
|
+
api_endpoint: str = "https://ai.berlin4h.top/api/chat/completions",
|
|
20
|
+
model: str = "gpt-3.5-turbo",
|
|
21
|
+
temperature: float = 0.9,
|
|
22
|
+
presence_penalty: float = 0,
|
|
23
|
+
frequency_penalty: float = 0,
|
|
24
|
+
max_tokens: int = 4000,
|
|
25
|
+
is_conversation: bool = True,
|
|
26
|
+
timeout: int = 30,
|
|
27
|
+
intro: str = None,
|
|
28
|
+
filepath: str = None,
|
|
29
|
+
update_file: bool = True,
|
|
30
|
+
proxies: dict = {},
|
|
31
|
+
history_offset: int = 10250,
|
|
32
|
+
act: str = None,
|
|
33
|
+
) -> None:
|
|
34
|
+
"""
|
|
35
|
+
Initializes the Berlin4h API with given parameters.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
api_token (str): The API token for authentication.
|
|
39
|
+
api_endpoint (str): The API endpoint to use for requests.
|
|
40
|
+
model (str): The AI model to use for text generation.
|
|
41
|
+
temperature (float): The temperature parameter for the model.
|
|
42
|
+
presence_penalty (float): The presence penalty parameter for the model.
|
|
43
|
+
frequency_penalty (float): The frequency penalty parameter for the model.
|
|
44
|
+
max_tokens (int): The maximum number of tokens to generate.
|
|
45
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
46
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
47
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
48
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
49
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
50
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
51
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
52
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
53
|
+
"""
|
|
54
|
+
self.api_token = api_token
|
|
55
|
+
self.api_endpoint = api_endpoint
|
|
56
|
+
self.model = model
|
|
57
|
+
self.temperature = temperature
|
|
58
|
+
self.presence_penalty = presence_penalty
|
|
59
|
+
self.frequency_penalty = frequency_penalty
|
|
60
|
+
self.max_tokens = max_tokens
|
|
61
|
+
self.parent_message_id: Optional[str] = None
|
|
62
|
+
self.session = requests.Session()
|
|
63
|
+
self.is_conversation = is_conversation
|
|
64
|
+
self.max_tokens_to_sample = max_tokens
|
|
65
|
+
self.stream_chunk_size = 1
|
|
66
|
+
self.timeout = timeout
|
|
67
|
+
self.last_response = {}
|
|
68
|
+
self.headers = {"Content-Type": "application/json", "Token": self.api_token}
|
|
69
|
+
self.__available_optimizers = (
|
|
70
|
+
method
|
|
71
|
+
for method in dir(Optimizers)
|
|
72
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
73
|
+
)
|
|
74
|
+
self.session.headers.update(self.headers)
|
|
75
|
+
Conversation.intro = (
|
|
76
|
+
AwesomePrompts().get_act(
|
|
77
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
78
|
+
)
|
|
79
|
+
if act
|
|
80
|
+
else intro or Conversation.intro
|
|
81
|
+
)
|
|
82
|
+
self.conversation = Conversation(
|
|
83
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
84
|
+
)
|
|
85
|
+
self.conversation.history_offset = history_offset
|
|
86
|
+
self.session.proxies = proxies
|
|
87
|
+
|
|
88
|
+
def ask(
|
|
89
|
+
self,
|
|
90
|
+
prompt: str,
|
|
91
|
+
stream: bool = False,
|
|
92
|
+
raw: bool = False,
|
|
93
|
+
optimizer: str = None,
|
|
94
|
+
conversationally: bool = False,
|
|
95
|
+
) -> Dict[str, Any]:
|
|
96
|
+
"""
|
|
97
|
+
Sends a prompt to the Berlin4h AI API and returns the response.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
prompt: The text prompt to generate text from.
|
|
101
|
+
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
102
|
+
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
103
|
+
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
104
|
+
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
The response from the API.
|
|
108
|
+
"""
|
|
109
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
110
|
+
if optimizer:
|
|
111
|
+
if optimizer in self.__available_optimizers:
|
|
112
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
113
|
+
conversation_prompt if conversationally else prompt
|
|
114
|
+
)
|
|
115
|
+
else:
|
|
116
|
+
raise Exception(
|
|
117
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
payload: Dict[str, any] = {
|
|
121
|
+
"prompt": conversation_prompt,
|
|
122
|
+
"parentMessageId": self.parent_message_id or str(uuid.uuid4()),
|
|
123
|
+
"options": {
|
|
124
|
+
"model": self.model,
|
|
125
|
+
"temperature": self.temperature,
|
|
126
|
+
"presence_penalty": self.presence_penalty,
|
|
127
|
+
"frequency_penalty": self.frequency_penalty,
|
|
128
|
+
"max_tokens": self.max_tokens,
|
|
129
|
+
},
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
def for_stream():
|
|
133
|
+
response = self.session.post(
|
|
134
|
+
self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
if not response.ok:
|
|
138
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
139
|
+
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
streaming_response = ""
|
|
143
|
+
# Collect the entire line before processing
|
|
144
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
145
|
+
if line:
|
|
146
|
+
try:
|
|
147
|
+
json_data = json.loads(line)
|
|
148
|
+
content = json_data['content']
|
|
149
|
+
if ">" in content: break
|
|
150
|
+
streaming_response += content
|
|
151
|
+
yield content if raw else dict(text=streaming_response) # Yield accumulated response
|
|
152
|
+
except:
|
|
153
|
+
continue
|
|
154
|
+
self.last_response.update(dict(text=streaming_response))
|
|
155
|
+
self.conversation.update_chat_history(
|
|
156
|
+
prompt, self.get_message(self.last_response)
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
def for_non_stream():
|
|
160
|
+
for _ in for_stream():
|
|
161
|
+
pass
|
|
162
|
+
return self.last_response
|
|
163
|
+
|
|
164
|
+
return for_stream() if stream else for_non_stream()
|
|
165
|
+
|
|
166
|
+
def chat(
|
|
167
|
+
self,
|
|
168
|
+
prompt: str,
|
|
169
|
+
stream: bool = False,
|
|
170
|
+
optimizer: str = None,
|
|
171
|
+
conversationally: bool = False,
|
|
172
|
+
) -> str:
|
|
173
|
+
"""Generate response `str`
|
|
174
|
+
Args:
|
|
175
|
+
prompt (str): Prompt to be send.
|
|
176
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
177
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
178
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
179
|
+
Returns:
|
|
180
|
+
str: Response generated
|
|
181
|
+
"""
|
|
182
|
+
|
|
183
|
+
def for_stream():
|
|
184
|
+
for response in self.ask(
|
|
185
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
186
|
+
):
|
|
187
|
+
yield self.get_message(response)
|
|
188
|
+
|
|
189
|
+
def for_non_stream():
|
|
190
|
+
return self.get_message(
|
|
191
|
+
self.ask(
|
|
192
|
+
prompt,
|
|
193
|
+
False,
|
|
194
|
+
optimizer=optimizer,
|
|
195
|
+
conversationally=conversationally,
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
return for_stream() if stream else for_non_stream()
|
|
200
|
+
|
|
201
|
+
def get_message(self, response: dict) -> str:
|
|
202
|
+
"""Retrieves message only from response
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
response (dict): Response generated by `self.ask`
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
str: Message extracted
|
|
209
|
+
"""
|
|
210
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
211
|
+
return response["text"]
|
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
from typing import Any, AsyncGenerator, Dict, Optional
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
|
|
6
|
+
from ..AIutel import Optimizers
|
|
7
|
+
from ..AIutel import Conversation
|
|
8
|
+
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from ..AIbase import Provider, AsyncProvider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ChatGPTUK(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the ChatGPT UK API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
is_conversation: bool = True,
|
|
21
|
+
max_tokens: int = 600,
|
|
22
|
+
temperature: float = 0.9,
|
|
23
|
+
presence_penalty: float = 0,
|
|
24
|
+
frequency_penalty: float = 0,
|
|
25
|
+
top_p: float = 1,
|
|
26
|
+
model: str = "google-gemini-pro",
|
|
27
|
+
timeout: int = 30,
|
|
28
|
+
intro: str = None,
|
|
29
|
+
filepath: str = None,
|
|
30
|
+
update_file: bool = True,
|
|
31
|
+
proxies: dict = {},
|
|
32
|
+
history_offset: int = 10250,
|
|
33
|
+
act: str = None,
|
|
34
|
+
) -> None:
|
|
35
|
+
"""
|
|
36
|
+
Initializes the ChatGPTUK API with given parameters.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
40
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
41
|
+
temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.9.
|
|
42
|
+
presence_penalty (float, optional): Chances of topic being repeated. Defaults to 0.
|
|
43
|
+
frequency_penalty (float, optional): Chances of word being repeated. Defaults to 0.
|
|
44
|
+
top_p (float, optional): Sampling threshold during inference time. Defaults to 1.
|
|
45
|
+
model (str, optional): LLM model name. Defaults to "google-gemini-pro".
|
|
46
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
47
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
48
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
49
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
50
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
51
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
52
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
53
|
+
"""
|
|
54
|
+
self.session = requests.Session()
|
|
55
|
+
self.is_conversation = is_conversation
|
|
56
|
+
self.max_tokens_to_sample = max_tokens
|
|
57
|
+
self.api_endpoint = "https://free.chatgpt.org.uk/api/openai/v1/chat/completions"
|
|
58
|
+
self.stream_chunk_size = 64
|
|
59
|
+
self.timeout = timeout
|
|
60
|
+
self.last_response = {}
|
|
61
|
+
self.model = model
|
|
62
|
+
self.temperature = temperature
|
|
63
|
+
self.presence_penalty = presence_penalty
|
|
64
|
+
self.frequency_penalty = frequency_penalty
|
|
65
|
+
self.top_p = top_p
|
|
66
|
+
self.headers = {"Content-Type": "application/json"}
|
|
67
|
+
|
|
68
|
+
self.__available_optimizers = (
|
|
69
|
+
method
|
|
70
|
+
for method in dir(Optimizers)
|
|
71
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
72
|
+
)
|
|
73
|
+
self.session.headers.update(self.headers)
|
|
74
|
+
Conversation.intro = (
|
|
75
|
+
AwesomePrompts().get_act(
|
|
76
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
77
|
+
)
|
|
78
|
+
if act
|
|
79
|
+
else intro or Conversation.intro
|
|
80
|
+
)
|
|
81
|
+
self.conversation = Conversation(
|
|
82
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
83
|
+
)
|
|
84
|
+
self.conversation.history_offset = history_offset
|
|
85
|
+
self.session.proxies = proxies
|
|
86
|
+
|
|
87
|
+
def ask(
|
|
88
|
+
self,
|
|
89
|
+
prompt: str,
|
|
90
|
+
stream: bool = False,
|
|
91
|
+
raw: bool = False,
|
|
92
|
+
optimizer: str = None,
|
|
93
|
+
conversationally: bool = False,
|
|
94
|
+
) -> dict:
|
|
95
|
+
"""Chat with AI
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
prompt (str): Prompt to be send.
|
|
99
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
100
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
101
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
102
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
103
|
+
Returns:
|
|
104
|
+
dict : {}
|
|
105
|
+
```json
|
|
106
|
+
{
|
|
107
|
+
"text" : "How may I assist you today?"
|
|
108
|
+
}
|
|
109
|
+
```
|
|
110
|
+
"""
|
|
111
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
112
|
+
if optimizer:
|
|
113
|
+
if optimizer in self.__available_optimizers:
|
|
114
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
115
|
+
conversation_prompt if conversationally else prompt
|
|
116
|
+
)
|
|
117
|
+
else:
|
|
118
|
+
raise Exception(
|
|
119
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
self.session.headers.update(self.headers)
|
|
123
|
+
payload = {
|
|
124
|
+
"messages": [
|
|
125
|
+
{"role": "system", "content": "Keep your responses long and detailed"},
|
|
126
|
+
{"role": "user", "content": conversation_prompt}
|
|
127
|
+
],
|
|
128
|
+
"stream": True,
|
|
129
|
+
"model": self.model,
|
|
130
|
+
"temperature": self.temperature,
|
|
131
|
+
"presence_penalty": self.presence_penalty,
|
|
132
|
+
"frequency_penalty": self.frequency_penalty,
|
|
133
|
+
"top_p": self.top_p,
|
|
134
|
+
"max_tokens": self.max_tokens_to_sample
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
def for_stream():
|
|
138
|
+
response = self.session.post(
|
|
139
|
+
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
140
|
+
)
|
|
141
|
+
if not response.ok:
|
|
142
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
143
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
streaming_response = ""
|
|
147
|
+
for line in response.iter_lines(decode_unicode=True, chunk_size=1):
|
|
148
|
+
if line:
|
|
149
|
+
modified_line = re.sub("data:", "", line)
|
|
150
|
+
try:
|
|
151
|
+
json_data = json.loads(modified_line)
|
|
152
|
+
content = json_data['choices'][0]['delta']['content']
|
|
153
|
+
streaming_response += content
|
|
154
|
+
yield content if raw else dict(text=streaming_response)
|
|
155
|
+
except:
|
|
156
|
+
continue
|
|
157
|
+
self.last_response.update(dict(text=streaming_response))
|
|
158
|
+
self.conversation.update_chat_history(
|
|
159
|
+
prompt, self.get_message(self.last_response)
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
def for_non_stream():
|
|
163
|
+
for _ in for_stream():
|
|
164
|
+
pass
|
|
165
|
+
return self.last_response
|
|
166
|
+
|
|
167
|
+
return for_stream() if stream else for_non_stream()
|
|
168
|
+
|
|
169
|
+
def chat(
|
|
170
|
+
self,
|
|
171
|
+
prompt: str,
|
|
172
|
+
stream: bool = False,
|
|
173
|
+
optimizer: str = None,
|
|
174
|
+
conversationally: bool = False,
|
|
175
|
+
) -> str:
|
|
176
|
+
"""Generate response `str`
|
|
177
|
+
Args:
|
|
178
|
+
prompt (str): Prompt to be send.
|
|
179
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
180
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
181
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
182
|
+
Returns:
|
|
183
|
+
str: Response generated
|
|
184
|
+
"""
|
|
185
|
+
|
|
186
|
+
def for_stream():
|
|
187
|
+
for response in self.ask(
|
|
188
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
189
|
+
):
|
|
190
|
+
yield self.get_message(response)
|
|
191
|
+
|
|
192
|
+
def for_non_stream():
|
|
193
|
+
return self.get_message(
|
|
194
|
+
self.ask(
|
|
195
|
+
prompt,
|
|
196
|
+
False,
|
|
197
|
+
optimizer=optimizer,
|
|
198
|
+
conversationally=conversationally,
|
|
199
|
+
)
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
return for_stream() if stream else for_non_stream()
|
|
203
|
+
|
|
204
|
+
def get_message(self, response: dict) -> str:
|
|
205
|
+
"""Retrieves message only from response
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
response (dict): Response generated by `self.ask`
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
str: Message extracted
|
|
212
|
+
"""
|
|
213
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
214
|
+
return response["text"]
|
webscout/Provider/__init__.py
CHANGED
|
@@ -25,7 +25,8 @@ from .Yepchat import YEPCHAT
|
|
|
25
25
|
from .Yepchat import AsyncYEPCHAT
|
|
26
26
|
from .Youchat import YouChat
|
|
27
27
|
from .Gemini import GEMINI
|
|
28
|
-
|
|
28
|
+
from .Berlin4h import Berlin4h
|
|
29
|
+
from .ChatGPTUK import ChatGPTUK
|
|
29
30
|
|
|
30
31
|
__all__ = [
|
|
31
32
|
'ThinkAnyAI',
|
|
@@ -53,5 +54,6 @@ __all__ = [
|
|
|
53
54
|
'AsyncYEPCHAT',
|
|
54
55
|
'YouChat',
|
|
55
56
|
'GEMINI',
|
|
56
|
-
|
|
57
|
+
'Berlin4h',
|
|
58
|
+
'ChatGPTUK',
|
|
57
59
|
]
|
webscout/__init__.py
CHANGED
|
@@ -38,7 +38,8 @@ from .Provider import (
|
|
|
38
38
|
AsyncYEPCHAT,
|
|
39
39
|
YouChat,
|
|
40
40
|
GEMINI,
|
|
41
|
-
|
|
41
|
+
Berlin4h,
|
|
42
|
+
ChatGPTUK,
|
|
42
43
|
)
|
|
43
44
|
|
|
44
45
|
__repo__ = "https://github.com/OE-LUCIFER/Webscout"
|
|
@@ -60,7 +61,8 @@ webai = [
|
|
|
60
61
|
"you",
|
|
61
62
|
"xjai",
|
|
62
63
|
"thinkany",
|
|
63
|
-
|
|
64
|
+
"berlin4h",
|
|
65
|
+
"chatgptuk",
|
|
64
66
|
"auto",
|
|
65
67
|
]
|
|
66
68
|
|
|
@@ -106,7 +108,8 @@ __all__ = [
|
|
|
106
108
|
"AsyncYEPCHAT",
|
|
107
109
|
"YouChat",
|
|
108
110
|
"GEMINI",
|
|
109
|
-
|
|
111
|
+
"Berlin4h",
|
|
112
|
+
"ChatGPTUK",
|
|
110
113
|
]
|
|
111
114
|
|
|
112
115
|
# Set up basic logger
|
webscout/webai.py
CHANGED
|
@@ -500,20 +500,34 @@ class Main(cmd.Cmd):
|
|
|
500
500
|
history_offset=history_offset,
|
|
501
501
|
act=awesome_prompt,
|
|
502
502
|
)
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
503
|
+
elif provider == "berlin4h":
|
|
504
|
+
from webscout import Berlin4h
|
|
505
|
+
|
|
506
|
+
self.bot = Berlin4h(
|
|
507
|
+
is_conversation=disable_conversation,
|
|
508
|
+
max_tokens=max_tokens,
|
|
509
|
+
timeout=timeout,
|
|
510
|
+
intro=intro,
|
|
511
|
+
filepath=filepath,
|
|
512
|
+
update_file=update_file,
|
|
513
|
+
proxies=proxies,
|
|
514
|
+
history_offset=history_offset,
|
|
515
|
+
act=awesome_prompt,
|
|
516
|
+
)
|
|
517
|
+
elif provider == "chatgptuk":
|
|
518
|
+
from webscout import ChatGPTUK
|
|
519
|
+
|
|
520
|
+
self.bot = ChatGPTUK(
|
|
521
|
+
is_conversation=disable_conversation,
|
|
522
|
+
max_tokens=max_tokens,
|
|
523
|
+
timeout=timeout,
|
|
524
|
+
intro=intro,
|
|
525
|
+
filepath=filepath,
|
|
526
|
+
update_file=update_file,
|
|
527
|
+
proxies=proxies,
|
|
528
|
+
history_offset=history_offset,
|
|
529
|
+
act=awesome_prompt,
|
|
530
|
+
)
|
|
517
531
|
elif provider == "yepchat":
|
|
518
532
|
from webscout import YEPCHAT
|
|
519
533
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.2
|
|
4
4
|
Summary: Search for anything using the Google, DuckDuckGo, phind.com. Also containes AI models, can transcribe yt videos, temporary email and phone number generation, have TTS support and webai(terminal gpt and open interpeter)
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -120,15 +120,16 @@ Search for anything using the Google, DuckDuckGo, phind.com. Also containes AI m
|
|
|
120
120
|
- [2. `YepChat` - Chat with mistral 8x7b powered by yepchat](#2-yepchat---chat-with-mistral-8x7b-powered-by-yepchat)
|
|
121
121
|
- [3. `You.com` - search/chat with you.com](#3-youcom---searchchat-with-youcom)
|
|
122
122
|
- [4. `Gemini` - search with google gemini](#4-gemini---search-with-google-gemini)
|
|
123
|
-
- [5. `
|
|
123
|
+
- [5. `Berlin4h` - chat with Berlin4h](#5-berlin4h---chat-with-berlin4h)
|
|
124
124
|
- [6. `BlackBox` - Search/chat With BlackBox](#6-blackbox---searchchat-with-blackbox)
|
|
125
125
|
- [7. `PERPLEXITY` - Search With PERPLEXITY](#7-perplexity---search-with-perplexity)
|
|
126
126
|
- [8. `OpenGPT` - chat With OPENGPT](#8-opengpt---chat-with-opengpt)
|
|
127
|
-
- [9. `
|
|
127
|
+
- [9. `KOBOLDAI` -](#9-koboldai--)
|
|
128
128
|
- [10. `Reka` - chat with reka](#10-reka---chat-with-reka)
|
|
129
129
|
- [11. `Cohere` - chat with cohere](#11-cohere---chat-with-cohere)
|
|
130
130
|
- [12. `Xjai` - chat with free gpt 3.5](#12-xjai---chat-with-free-gpt-35)
|
|
131
131
|
- [13. `ThinkAny` - AI search engine](#13-thinkany---ai-search-engine)
|
|
132
|
+
- [14. `chatgptuk` - Chat with gemini-pro](#14-chatgptuk---chat-with-gemini-pro)
|
|
132
133
|
- [`LLM`](#llm)
|
|
133
134
|
- [`LLM` with internet](#llm-with-internet)
|
|
134
135
|
- [LLM with deepwebs](#llm-with-deepwebs)
|
|
@@ -1009,15 +1010,26 @@ gemini = GEMINI(cookie_file=COOKIE_FILE, proxy=PROXIES)
|
|
|
1009
1010
|
response = gemini.chat("What is the meaning of life?")
|
|
1010
1011
|
print(response)
|
|
1011
1012
|
```
|
|
1012
|
-
### 5. `
|
|
1013
|
+
### 5. `Berlin4h` - chat with Berlin4h
|
|
1013
1014
|
```python
|
|
1014
|
-
from webscout import
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1015
|
+
from webscout import Berlin4h
|
|
1016
|
+
# Create an instance of the PERPLEXITY class
|
|
1017
|
+
ai = Berlin4h(
|
|
1018
|
+
is_conversation=True,
|
|
1019
|
+
max_tokens=800,
|
|
1020
|
+
timeout=30,
|
|
1021
|
+
intro=None,
|
|
1022
|
+
filepath=None,
|
|
1023
|
+
update_file=True,
|
|
1024
|
+
proxies={},
|
|
1025
|
+
history_offset=10250,
|
|
1026
|
+
act=None,
|
|
1027
|
+
)
|
|
1018
1028
|
|
|
1019
|
-
#
|
|
1020
|
-
|
|
1029
|
+
# Example usage:
|
|
1030
|
+
prompt = "Explain the concept of recursion in simple terms."
|
|
1031
|
+
response = ai.chat(prompt)
|
|
1032
|
+
print(response)
|
|
1021
1033
|
```
|
|
1022
1034
|
### 6. `BlackBox` - Search/chat With BlackBox
|
|
1023
1035
|
```python
|
|
@@ -1073,7 +1085,7 @@ while True:
|
|
|
1073
1085
|
response_str = opengpt.chat(prompt)
|
|
1074
1086
|
print(response_str)
|
|
1075
1087
|
```
|
|
1076
|
-
### 9. `
|
|
1088
|
+
### 9. `KOBOLDAI` -
|
|
1077
1089
|
```python
|
|
1078
1090
|
from webscout import KOBOLDAI
|
|
1079
1091
|
|
|
@@ -1162,7 +1174,27 @@ response = ai.ask(prompt)
|
|
|
1162
1174
|
message = ai.get_message(response)
|
|
1163
1175
|
print(message)
|
|
1164
1176
|
```
|
|
1177
|
+
### 14. `chatgptuk` - Chat with gemini-pro
|
|
1178
|
+
```python
|
|
1179
|
+
from webscout import ChatGPTUK
|
|
1180
|
+
# Create an instance of the PERPLEXITY class
|
|
1181
|
+
ai = ChatGPTUK(
|
|
1182
|
+
is_conversation=True,
|
|
1183
|
+
max_tokens=800,
|
|
1184
|
+
timeout=30,
|
|
1185
|
+
intro=None,
|
|
1186
|
+
filepath=None,
|
|
1187
|
+
update_file=True,
|
|
1188
|
+
proxies={},
|
|
1189
|
+
history_offset=10250,
|
|
1190
|
+
act=None,
|
|
1191
|
+
)
|
|
1165
1192
|
|
|
1193
|
+
# Example usage:
|
|
1194
|
+
prompt = "Explain the concept of recursion in simple terms."
|
|
1195
|
+
response = ai.chat(prompt)
|
|
1196
|
+
print(response)
|
|
1197
|
+
```
|
|
1166
1198
|
### `LLM`
|
|
1167
1199
|
```python
|
|
1168
1200
|
from webscout.LLM import LLM
|
|
@@ -12,10 +12,10 @@ DeepWEBS/utilsdw/enver.py,sha256=vpI7s4_o_VL9govSryOv-z1zYK3pTEW3-H9QNN8JYtc,247
|
|
|
12
12
|
DeepWEBS/utilsdw/logger.py,sha256=Z0nFUcEGyU8r28yKiIyvEtO26xxpmJgbvNToTfwZecc,8174
|
|
13
13
|
webscout/AIauto.py,sha256=NlIx-Nfuq-xJ3uZUOUJUXtZ2tzwcbx1ViIlnVK2aCrw,17297
|
|
14
14
|
webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
|
|
15
|
-
webscout/AIutel.py,sha256=
|
|
15
|
+
webscout/AIutel.py,sha256=LXYgQOEk21c7MpNJlSl2RbLvR8HDJhTSwJiVVgqf0Iw,33266
|
|
16
16
|
webscout/DWEBS.py,sha256=QT-7-dUgWhQ_H7EVZD53AVyXxyskoPMKCkFIpzkN56Q,7332
|
|
17
17
|
webscout/LLM.py,sha256=CiDz0okZNEoXuxMwadZnwRGSLpqk2zg0vzvXSxQZjcE,1910
|
|
18
|
-
webscout/__init__.py,sha256=
|
|
18
|
+
webscout/__init__.py,sha256=jJ6690a2XMglvK9LeGpiWJEGZuhWivUx3wHAQ0ov9dA,2338
|
|
19
19
|
webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
|
|
20
20
|
webscout/async_providers.py,sha256=KGWKAhdEh4nMntLtyCaO0p827Tcg__gBLT_MLRO4l5o,711
|
|
21
21
|
webscout/cli.py,sha256=F888fdrFUQgczMBN4yMOSf6Nh-IbvkqpPhDsbnA2FtQ,17059
|
|
@@ -27,10 +27,12 @@ webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
|
|
|
27
27
|
webscout/utils.py,sha256=CxeXvp0rWIulUrEaPZMaNfg_tSuQLRSV8uuHA2chyKE,2603
|
|
28
28
|
webscout/version.py,sha256=fFBXyqC1-7JPoHD1UTEFlaKrCKVdGOVN0qqyZ63dkXw,23
|
|
29
29
|
webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
|
|
30
|
-
webscout/webai.py,sha256=
|
|
30
|
+
webscout/webai.py,sha256=cRzJNVljryCxV4kEtmii0_CEBxSs71Ft3sdPdcqGmBg,84668
|
|
31
31
|
webscout/webscout_search.py,sha256=TvbrRYVMXbFGgEh0CoFHNYVY3iQ8SmejxEmv8Csu4IA,3159
|
|
32
32
|
webscout/webscout_search_async.py,sha256=4_L_t_I9WlvpPEI3FI0K3v6Aayr0pNvD3chYOp7JR8o,42902
|
|
33
|
+
webscout/Provider/Berlin4h.py,sha256=-O6BRkLusUEdYXcyQ09iY86dFl9WoiA4mlmZ_DLZbos,8342
|
|
33
34
|
webscout/Provider/Blackboxai.py,sha256=8B5wT_eb86RVZ5uOqwvgVC5QATl0uEMCli0n4SDwt1M,16743
|
|
35
|
+
webscout/Provider/ChatGPTUK.py,sha256=ozpWnuOlC_7jeDcTuUukFPcPkIksx-Bgq_6Rrf0Bwak,8357
|
|
34
36
|
webscout/Provider/Cohere.py,sha256=6lxu0luoIaTTI0uEmJwY5hsiIIq0meZf35jaGcCvcSA,8489
|
|
35
37
|
webscout/Provider/Gemini.py,sha256=UmFcU1MLNK7nwIRKS1pyA39JHeVLTZII0444LW-KmSM,8235
|
|
36
38
|
webscout/Provider/Groq.py,sha256=vfaSEbzGY92YiADbeUufmy2OGshAoO0WKmmJ75c5uZY,20583
|
|
@@ -46,10 +48,10 @@ webscout/Provider/ThinkAnyAI.py,sha256=_qFjj0djxxrranyEY33w14oizyRjzlVwMv_hzvVtw
|
|
|
46
48
|
webscout/Provider/Xjai.py,sha256=gI9FqEodS-jHfFM_CsDPmTb_wL5NU2q__2fg9hqVoEc,8809
|
|
47
49
|
webscout/Provider/Yepchat.py,sha256=E0tv3Zfoqs1Sw8Pe-6_5d--_1LESm8mjw536DWclJk8,19398
|
|
48
50
|
webscout/Provider/Youchat.py,sha256=JAZYwcj0Kl1UUgqN0rD3TKaReA1G-cmIlW_4mog1j_c,7756
|
|
49
|
-
webscout/Provider/__init__.py,sha256=
|
|
50
|
-
webscout-2.
|
|
51
|
-
webscout-2.
|
|
52
|
-
webscout-2.
|
|
53
|
-
webscout-2.
|
|
54
|
-
webscout-2.
|
|
55
|
-
webscout-2.
|
|
51
|
+
webscout/Provider/__init__.py,sha256=BPYm-ZOkZOXXn3bx_2UHIV6aZS47d9Y01JDq-EiPXhQ,1318
|
|
52
|
+
webscout-2.2.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
|
|
53
|
+
webscout-2.2.dist-info/METADATA,sha256=hZMwgDbYv2rbOEB3JTAAH7TOpVWou5e9-zsWxEnflS4,46482
|
|
54
|
+
webscout-2.2.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
55
|
+
webscout-2.2.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
|
|
56
|
+
webscout-2.2.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
|
|
57
|
+
webscout-2.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|