webscout 6.1__py3-none-any.whl → 6.2b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +77 -259
- webscout/Agents/functioncall.py +2 -2
- webscout/Extra/autollama.py +37 -20
- webscout/Local/utils.py +37 -12
- webscout/Provider/Amigo.py +50 -37
- webscout/Provider/Deepseek.py +7 -6
- webscout/Provider/EDITEE.py +2 -2
- webscout/Provider/GPTWeb.py +1 -1
- webscout/Provider/NinjaChat.py +2 -2
- webscout/Provider/OLLAMA.py +1 -1
- webscout/Provider/Perplexity.py +1 -1
- webscout/Provider/Reka.py +12 -5
- webscout/Provider/TTI/AIuncensored.py +103 -0
- webscout/Provider/TTI/__init__.py +2 -2
- webscout/Provider/TeachAnything.py +0 -3
- webscout/Provider/__init__.py +4 -10
- webscout/Provider/cerebras.py +125 -118
- webscout/Provider/cleeai.py +1 -1
- webscout/Provider/felo_search.py +1 -1
- webscout/Provider/gaurish.py +41 -2
- webscout/Provider/geminiprorealtime.py +1 -1
- webscout/Provider/genspark.py +1 -1
- webscout/Provider/julius.py +4 -3
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/promptrefine.py +3 -1
- webscout/Provider/turboseek.py +3 -8
- webscout/Provider/tutorai.py +1 -1
- webscout/__init__.py +2 -43
- webscout/tempid.py +4 -73
- webscout/version.py +1 -1
- webscout/webai.py +1 -1
- {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/METADATA +36 -119
- {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/RECORD +37 -43
- webscout/Provider/BasedGPT.py +0 -214
- webscout/Provider/ChatHub.py +0 -209
- webscout/Provider/TTI/amigo.py +0 -148
- webscout/Provider/aigames.py +0 -213
- webscout/Provider/bixin.py +0 -264
- webscout/Provider/xdash.py +0 -182
- webscout/websx_search.py +0 -19
- {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/LICENSE.md +0 -0
- {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/WHEEL +0 -0
- {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/entry_points.txt +0 -0
- {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/top_level.txt +0 -0
webscout/Provider/aigames.py
DELETED
|
@@ -1,213 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import uuid
|
|
3
|
-
import json
|
|
4
|
-
|
|
5
|
-
from webscout.AIutel import Optimizers
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
|
|
10
|
-
class AIGameIO(Provider):
|
|
11
|
-
"""
|
|
12
|
-
A class to interact with the AI-Game.io API.
|
|
13
|
-
"""
|
|
14
|
-
|
|
15
|
-
def __init__(
|
|
16
|
-
self,
|
|
17
|
-
is_conversation: bool = True,
|
|
18
|
-
max_tokens: int = 600,
|
|
19
|
-
timeout: int = 30,
|
|
20
|
-
intro: str = None,
|
|
21
|
-
filepath: str = None,
|
|
22
|
-
update_file: bool = True,
|
|
23
|
-
proxies: dict = {},
|
|
24
|
-
history_offset: int = 10250,
|
|
25
|
-
act: str = None,
|
|
26
|
-
system_prompt: str = "You are a Helpful ai"
|
|
27
|
-
):
|
|
28
|
-
"""
|
|
29
|
-
Initializes the AI-Game.io API with given parameters.
|
|
30
|
-
|
|
31
|
-
Args:
|
|
32
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
33
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
34
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
35
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
36
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
37
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
38
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
39
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
40
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
41
|
-
system_prompt (str, optional): System prompt for AI-Game.io.
|
|
42
|
-
Defaults to "You are a Helpful ai".
|
|
43
|
-
"""
|
|
44
|
-
self.session = requests.Session()
|
|
45
|
-
self.is_conversation = is_conversation
|
|
46
|
-
self.max_tokens_to_sample = max_tokens
|
|
47
|
-
self.api_endpoint = 'https://stream-chat-blmeirpipa-uc.a.run.app/streamChat'
|
|
48
|
-
self.stream_chunk_size = 64
|
|
49
|
-
self.timeout = timeout
|
|
50
|
-
self.last_response = {}
|
|
51
|
-
self.system_prompt = system_prompt
|
|
52
|
-
self.headers = {
|
|
53
|
-
'authority': 'stream-chat-blmeirpipa-uc.a.run.app',
|
|
54
|
-
'method': 'POST',
|
|
55
|
-
'path': '/streamChat',
|
|
56
|
-
'accept': 'text/event-stream',
|
|
57
|
-
'content-type': 'application/json',
|
|
58
|
-
'origin': 'https://www.ai-game.io',
|
|
59
|
-
'priority': 'u=1, i',
|
|
60
|
-
'referer': 'https://www.ai-game.io/',
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
self.__available_optimizers = (
|
|
64
|
-
method
|
|
65
|
-
for method in dir(Optimizers)
|
|
66
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
67
|
-
)
|
|
68
|
-
self.session.headers.update(self.headers)
|
|
69
|
-
Conversation.intro = (
|
|
70
|
-
AwesomePrompts().get_act(
|
|
71
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
72
|
-
)
|
|
73
|
-
if act
|
|
74
|
-
else intro or Conversation.intro
|
|
75
|
-
)
|
|
76
|
-
self.conversation = Conversation(
|
|
77
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
78
|
-
)
|
|
79
|
-
self.conversation.history_offset = history_offset
|
|
80
|
-
self.session.proxies = proxies
|
|
81
|
-
|
|
82
|
-
def ask(
|
|
83
|
-
self,
|
|
84
|
-
prompt: str,
|
|
85
|
-
stream: bool = False,
|
|
86
|
-
raw: bool = False,
|
|
87
|
-
optimizer: str = None,
|
|
88
|
-
conversationally: bool = False,
|
|
89
|
-
) -> dict:
|
|
90
|
-
"""Chat with AI
|
|
91
|
-
|
|
92
|
-
Args:
|
|
93
|
-
prompt (str): Prompt to be send.
|
|
94
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
95
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
96
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
97
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
98
|
-
Returns:
|
|
99
|
-
dict : {}
|
|
100
|
-
```json
|
|
101
|
-
{
|
|
102
|
-
"text" : "How may I assist you today?"
|
|
103
|
-
}
|
|
104
|
-
```
|
|
105
|
-
"""
|
|
106
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
107
|
-
if optimizer:
|
|
108
|
-
if optimizer in self.__available_optimizers:
|
|
109
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
110
|
-
conversation_prompt if conversationally else prompt
|
|
111
|
-
)
|
|
112
|
-
else:
|
|
113
|
-
raise Exception(
|
|
114
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
payload = {
|
|
118
|
-
"history": [
|
|
119
|
-
{
|
|
120
|
-
"role": "system",
|
|
121
|
-
"content": self.system_prompt
|
|
122
|
-
},
|
|
123
|
-
{
|
|
124
|
-
"role": "user",
|
|
125
|
-
"content": conversation_prompt
|
|
126
|
-
}
|
|
127
|
-
]
|
|
128
|
-
}
|
|
129
|
-
def for_stream():
|
|
130
|
-
response = self.session.post(
|
|
131
|
-
self.api_endpoint, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout
|
|
132
|
-
)
|
|
133
|
-
if not response.ok:
|
|
134
|
-
raise Exception(
|
|
135
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
136
|
-
)
|
|
137
|
-
|
|
138
|
-
full_response = ''
|
|
139
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
140
|
-
if line.startswith("data: "):
|
|
141
|
-
try:
|
|
142
|
-
event_data = json.loads(line[6:])
|
|
143
|
-
if event_data['event'] == 'text-chunk':
|
|
144
|
-
full_response += event_data['data']['text']
|
|
145
|
-
yield event_data['data']['text'] if raw else dict(text=full_response)
|
|
146
|
-
except json.JSONDecodeError:
|
|
147
|
-
pass
|
|
148
|
-
self.last_response.update(dict(text=full_response))
|
|
149
|
-
self.conversation.update_chat_history(
|
|
150
|
-
prompt, self.get_message(self.last_response)
|
|
151
|
-
)
|
|
152
|
-
def for_non_stream():
|
|
153
|
-
for _ in for_stream():
|
|
154
|
-
pass
|
|
155
|
-
return self.last_response
|
|
156
|
-
|
|
157
|
-
return for_stream() if stream else for_non_stream()
|
|
158
|
-
|
|
159
|
-
def chat(
|
|
160
|
-
self,
|
|
161
|
-
prompt: str,
|
|
162
|
-
stream: bool = False,
|
|
163
|
-
optimizer: str = None,
|
|
164
|
-
conversationally: bool = False,
|
|
165
|
-
) -> str:
|
|
166
|
-
"""Generate response `str`
|
|
167
|
-
Args:
|
|
168
|
-
prompt (str): Prompt to be send.
|
|
169
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
170
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
171
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
172
|
-
Returns:
|
|
173
|
-
str: Response generated
|
|
174
|
-
"""
|
|
175
|
-
|
|
176
|
-
def for_stream():
|
|
177
|
-
for response in self.ask(
|
|
178
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
179
|
-
):
|
|
180
|
-
yield self.get_message(response)
|
|
181
|
-
|
|
182
|
-
def for_non_stream():
|
|
183
|
-
return self.get_message(
|
|
184
|
-
self.ask(
|
|
185
|
-
prompt,
|
|
186
|
-
False,
|
|
187
|
-
optimizer=optimizer,
|
|
188
|
-
conversationally=conversationally,
|
|
189
|
-
)
|
|
190
|
-
)
|
|
191
|
-
|
|
192
|
-
return for_stream() if stream else for_non_stream()
|
|
193
|
-
|
|
194
|
-
def get_message(self, response: dict) -> str:
|
|
195
|
-
"""Retrieves message only from response
|
|
196
|
-
|
|
197
|
-
Args:
|
|
198
|
-
response (dict): Response generated by `self.ask`
|
|
199
|
-
|
|
200
|
-
Returns:
|
|
201
|
-
str: Message extracted
|
|
202
|
-
"""
|
|
203
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
204
|
-
return response["text"]
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
if __name__ == "__main__":
|
|
208
|
-
from rich import print
|
|
209
|
-
|
|
210
|
-
ai = AIGameIO()
|
|
211
|
-
response = ai.chat("hi")
|
|
212
|
-
for chunk in response:
|
|
213
|
-
print(chunk, end="", flush=True)
|
webscout/Provider/bixin.py
DELETED
|
@@ -1,264 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import random
|
|
4
|
-
from typing import Any, Dict, Optional, Generator
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts
|
|
9
|
-
from webscout.AIbase import Provider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class Bixin(Provider):
|
|
14
|
-
"""
|
|
15
|
-
A class to interact with the Bixin API.
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
AVAILABLE_MODELS = [
|
|
19
|
-
'gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo'
|
|
20
|
-
]
|
|
21
|
-
|
|
22
|
-
def __init__(
|
|
23
|
-
self,
|
|
24
|
-
is_conversation: bool = True,
|
|
25
|
-
max_tokens: int = 600,
|
|
26
|
-
timeout: int = 30,
|
|
27
|
-
intro: str = None,
|
|
28
|
-
filepath: str = None,
|
|
29
|
-
update_file: bool = True,
|
|
30
|
-
proxies: dict = {},
|
|
31
|
-
history_offset: int = 10250,
|
|
32
|
-
act: str = None,
|
|
33
|
-
model: str = 'gpt-4-turbo', # Default model
|
|
34
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
35
|
-
):
|
|
36
|
-
"""
|
|
37
|
-
Initializes the Bixin API with given parameters.
|
|
38
|
-
|
|
39
|
-
Args:
|
|
40
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
41
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
42
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
43
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
44
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
45
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
46
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
47
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
48
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
49
|
-
model (str, optional): AI model to use. Defaults to "gpt-4-turbo".
|
|
50
|
-
system_prompt (str, optional): System prompt for Bixin.
|
|
51
|
-
Defaults to "You are a helpful assistant.".
|
|
52
|
-
"""
|
|
53
|
-
if model not in self.AVAILABLE_MODELS:
|
|
54
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
55
|
-
|
|
56
|
-
self.session = requests.Session()
|
|
57
|
-
self.is_conversation = is_conversation
|
|
58
|
-
self.max_tokens_to_sample = max_tokens
|
|
59
|
-
self.api_endpoint = "https://chat.bixin123.com/api/chatgpt/chat-process"
|
|
60
|
-
self.stream_chunk_size = 1024
|
|
61
|
-
self.timeout = timeout
|
|
62
|
-
self.last_response = {}
|
|
63
|
-
self.model = model
|
|
64
|
-
self.system_prompt = system_prompt
|
|
65
|
-
self.headers = {
|
|
66
|
-
"Accept": "application/json, text/plain, */*",
|
|
67
|
-
"Accept-Language": "en-US,en;q=0.9",
|
|
68
|
-
"Cache-Control": "no-cache",
|
|
69
|
-
"Content-Type": "application/json",
|
|
70
|
-
"Fingerprint": self.generate_fingerprint(),
|
|
71
|
-
"Origin": "https://chat.bixin123.com",
|
|
72
|
-
"Pragma": "no-cache",
|
|
73
|
-
"Priority": "u=1, i",
|
|
74
|
-
"Referer": "https://chat.bixin123.com/chat",
|
|
75
|
-
"Sec-CH-UA": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
|
76
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
77
|
-
"Sec-CH-UA-Platform": '"Linux"',
|
|
78
|
-
"Sec-Fetch-Dest": "empty",
|
|
79
|
-
"Sec-Fetch-Mode": "cors",
|
|
80
|
-
"Sec-Fetch-Site": "same-origin",
|
|
81
|
-
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
|
82
|
-
"X-Website-Domain": "chat.bixin123.com",
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
self.__available_optimizers = (
|
|
86
|
-
method
|
|
87
|
-
for method in dir(Optimizers)
|
|
88
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
89
|
-
)
|
|
90
|
-
self.session.headers.update(self.headers)
|
|
91
|
-
Conversation.intro = (
|
|
92
|
-
AwesomePrompts().get_act(
|
|
93
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
94
|
-
)
|
|
95
|
-
if act
|
|
96
|
-
else intro or Conversation.intro
|
|
97
|
-
)
|
|
98
|
-
self.conversation = Conversation(
|
|
99
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
100
|
-
)
|
|
101
|
-
self.conversation.history_offset = history_offset
|
|
102
|
-
self.session.proxies = proxies
|
|
103
|
-
|
|
104
|
-
def generate_fingerprint(self) -> str:
|
|
105
|
-
"""
|
|
106
|
-
Generates a random fingerprint number as a string.
|
|
107
|
-
"""
|
|
108
|
-
return str(random.randint(100000000, 999999999))
|
|
109
|
-
|
|
110
|
-
def ask(
|
|
111
|
-
self,
|
|
112
|
-
prompt: str,
|
|
113
|
-
stream: bool = False,
|
|
114
|
-
raw: bool = False,
|
|
115
|
-
optimizer: str = None,
|
|
116
|
-
conversationally: bool = False,
|
|
117
|
-
) -> dict:
|
|
118
|
-
"""Chat with Bixin
|
|
119
|
-
|
|
120
|
-
Args:
|
|
121
|
-
prompt (str): Prompt to be send.
|
|
122
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
123
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
124
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
125
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
126
|
-
Returns:
|
|
127
|
-
dict : {}
|
|
128
|
-
```json
|
|
129
|
-
{
|
|
130
|
-
"text" : "How may I assist you today?"
|
|
131
|
-
}
|
|
132
|
-
```
|
|
133
|
-
"""
|
|
134
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
135
|
-
if optimizer:
|
|
136
|
-
if optimizer in self.__available_optimizers:
|
|
137
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
138
|
-
conversation_prompt if conversationally else prompt
|
|
139
|
-
)
|
|
140
|
-
else:
|
|
141
|
-
raise Exception(
|
|
142
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
143
|
-
)
|
|
144
|
-
|
|
145
|
-
messages = [
|
|
146
|
-
{"role": "system", "content": self.system_prompt},
|
|
147
|
-
{"role": "user", "content": conversation_prompt},
|
|
148
|
-
]
|
|
149
|
-
|
|
150
|
-
data = {
|
|
151
|
-
"prompt": self.format_prompt(messages),
|
|
152
|
-
"options": {
|
|
153
|
-
"usingNetwork": False,
|
|
154
|
-
"file": ""
|
|
155
|
-
}
|
|
156
|
-
}
|
|
157
|
-
|
|
158
|
-
def for_stream():
|
|
159
|
-
try:
|
|
160
|
-
with requests.post(self.api_endpoint, headers=self.headers, json=data, stream=True, timeout=self.timeout) as response:
|
|
161
|
-
response.raise_for_status()
|
|
162
|
-
|
|
163
|
-
# Initialize variable to keep track of the last printed text
|
|
164
|
-
previous_text = ""
|
|
165
|
-
|
|
166
|
-
full_response = ''
|
|
167
|
-
for chunk in response.iter_content(chunk_size=self.stream_chunk_size, decode_unicode=True):
|
|
168
|
-
if chunk:
|
|
169
|
-
try:
|
|
170
|
-
json_chunk = json.loads(chunk)
|
|
171
|
-
text = json_chunk.get("text", "")
|
|
172
|
-
|
|
173
|
-
# Determine the new text to print
|
|
174
|
-
if text.startswith(previous_text):
|
|
175
|
-
new_text = text[len(previous_text):]
|
|
176
|
-
full_response += new_text
|
|
177
|
-
yield new_text if raw else dict(text=new_text)
|
|
178
|
-
previous_text = text
|
|
179
|
-
else:
|
|
180
|
-
full_response += text
|
|
181
|
-
yield text if raw else dict(text=full_response)
|
|
182
|
-
previous_text = text
|
|
183
|
-
except json.JSONDecodeError:
|
|
184
|
-
# If the chunk isn't a complete JSON object, skip it
|
|
185
|
-
continue
|
|
186
|
-
self.last_response.update(dict(text=full_response))
|
|
187
|
-
self.conversation.update_chat_history(
|
|
188
|
-
prompt, self.get_message(self.last_response)
|
|
189
|
-
)
|
|
190
|
-
except requests.RequestException as e:
|
|
191
|
-
raise exceptions.FailedToGenerateResponseError(f"\nRequest failed: {e}")
|
|
192
|
-
|
|
193
|
-
def for_non_stream():
|
|
194
|
-
for _ in for_stream():
|
|
195
|
-
pass
|
|
196
|
-
return self.last_response
|
|
197
|
-
|
|
198
|
-
return for_stream() if stream else for_non_stream()
|
|
199
|
-
|
|
200
|
-
def format_prompt(self, messages: list) -> str:
|
|
201
|
-
"""
|
|
202
|
-
Formats the list of messages into a single prompt string.
|
|
203
|
-
"""
|
|
204
|
-
formatted_messages = []
|
|
205
|
-
for message in messages:
|
|
206
|
-
role = message.get("role", "")
|
|
207
|
-
content = message.get("content", "")
|
|
208
|
-
formatted_messages.append(f"{role}: {content}")
|
|
209
|
-
return "\n".join(formatted_messages)
|
|
210
|
-
|
|
211
|
-
def chat(
|
|
212
|
-
self,
|
|
213
|
-
prompt: str,
|
|
214
|
-
stream: bool = False,
|
|
215
|
-
optimizer: str = None,
|
|
216
|
-
conversationally: bool = False,
|
|
217
|
-
) -> str:
|
|
218
|
-
"""Generate response `str`
|
|
219
|
-
Args:
|
|
220
|
-
prompt (str): Prompt to be send.
|
|
221
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
222
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
223
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
224
|
-
Returns:
|
|
225
|
-
str: Response generated
|
|
226
|
-
"""
|
|
227
|
-
|
|
228
|
-
def for_stream():
|
|
229
|
-
for response in self.ask(
|
|
230
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
231
|
-
):
|
|
232
|
-
yield self.get_message(response)
|
|
233
|
-
|
|
234
|
-
def for_non_stream():
|
|
235
|
-
return self.get_message(
|
|
236
|
-
self.ask(
|
|
237
|
-
prompt,
|
|
238
|
-
False,
|
|
239
|
-
optimizer=optimizer,
|
|
240
|
-
conversationally=conversationally,
|
|
241
|
-
)
|
|
242
|
-
)
|
|
243
|
-
|
|
244
|
-
return for_stream() if stream else for_non_stream()
|
|
245
|
-
|
|
246
|
-
def get_message(self, response: dict) -> str:
|
|
247
|
-
"""Retrieves message only from response
|
|
248
|
-
|
|
249
|
-
Args:
|
|
250
|
-
response (dict): Response generated by `self.ask`
|
|
251
|
-
|
|
252
|
-
Returns:
|
|
253
|
-
str: Message extracted
|
|
254
|
-
"""
|
|
255
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
256
|
-
return response["text"]
|
|
257
|
-
|
|
258
|
-
if __name__ == "__main__":
|
|
259
|
-
from rich import print
|
|
260
|
-
|
|
261
|
-
ai = Bixin(timeout=5000)
|
|
262
|
-
response = ai.chat("write a poem about AI", stream=True)
|
|
263
|
-
for chunk in response:
|
|
264
|
-
print(chunk, end="", flush=True)
|
webscout/Provider/xdash.py
DELETED
|
@@ -1,182 +0,0 @@
|
|
|
1
|
-
import uuid
|
|
2
|
-
import requests
|
|
3
|
-
from webscout.AIutel import Optimizers
|
|
4
|
-
from webscout.AIutel import Conversation
|
|
5
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
6
|
-
from webscout.AIbase import Provider, AsyncProvider
|
|
7
|
-
from webscout import exceptions
|
|
8
|
-
from typing import Any, AsyncGenerator, Dict
|
|
9
|
-
|
|
10
|
-
class XDASH(Provider):
|
|
11
|
-
def __init__(
|
|
12
|
-
self,
|
|
13
|
-
is_conversation: bool = True,
|
|
14
|
-
max_tokens: int = 600,
|
|
15
|
-
timeout: int = 30,
|
|
16
|
-
intro: str = None,
|
|
17
|
-
filepath: str = None,
|
|
18
|
-
update_file: bool = True,
|
|
19
|
-
proxies: dict = {},
|
|
20
|
-
history_offset: int = 10250,
|
|
21
|
-
act: str = None,
|
|
22
|
-
):
|
|
23
|
-
"""Instantiates XDASH
|
|
24
|
-
|
|
25
|
-
Args:
|
|
26
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
27
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
28
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
29
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
30
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
31
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
32
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
33
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
34
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
35
|
-
"""
|
|
36
|
-
self.session = requests.Session()
|
|
37
|
-
self.is_conversation = is_conversation
|
|
38
|
-
self.max_tokens_to_sample = max_tokens
|
|
39
|
-
self.chat_endpoint = "https://www.xdash.ai/api/query"
|
|
40
|
-
self.stream_chunk_size = 64
|
|
41
|
-
self.timeout = timeout
|
|
42
|
-
self.last_response = {}
|
|
43
|
-
self.headers = {
|
|
44
|
-
"accept": "*/*",
|
|
45
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
46
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
47
|
-
"content-type": "application/json",
|
|
48
|
-
"cookie": "cf_clearance=73aup_8JU0LU.tRr7D4qd4Kt7gapKFi3RVW8jLzQoP0-1723549451-1.0.1.1-HTRrjMvM5GRLsfCTB0v3N_UxQzQMfA1fvOSf0dsZJ73HR6.IUTH8BH.G1dpx3s_IxVHCBCHMXOCt0K7vyIwMgw",
|
|
49
|
-
"dnt": "1",
|
|
50
|
-
"origin": "https://www.xdash.ai",
|
|
51
|
-
"priority": "u=1, i",
|
|
52
|
-
"referer": "https://www.xdash.ai/search",
|
|
53
|
-
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
54
|
-
"sec-ch-ua-mobile": "?0",
|
|
55
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
56
|
-
"sec-fetch-dest": "empty",
|
|
57
|
-
"sec-fetch-mode": "cors",
|
|
58
|
-
"sec-fetch-site": "same-origin",
|
|
59
|
-
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
self.__available_optimizers = (
|
|
63
|
-
method
|
|
64
|
-
for method in dir(Optimizers)
|
|
65
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
66
|
-
)
|
|
67
|
-
self.session.headers.update(self.headers)
|
|
68
|
-
Conversation.intro = (
|
|
69
|
-
AwesomePrompts().get_act(
|
|
70
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
71
|
-
)
|
|
72
|
-
if act
|
|
73
|
-
else intro or Conversation.intro
|
|
74
|
-
)
|
|
75
|
-
self.conversation = Conversation(
|
|
76
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
77
|
-
)
|
|
78
|
-
self.conversation.history_offset = history_offset
|
|
79
|
-
self.session.proxies = proxies
|
|
80
|
-
|
|
81
|
-
def ask(
|
|
82
|
-
self,
|
|
83
|
-
prompt: str,
|
|
84
|
-
stream: bool = False,
|
|
85
|
-
raw: bool = False,
|
|
86
|
-
optimizer: str = None,
|
|
87
|
-
conversationally: bool = False,
|
|
88
|
-
) -> dict:
|
|
89
|
-
"""Chat with AI
|
|
90
|
-
|
|
91
|
-
Args:
|
|
92
|
-
prompt (str): Prompt to be send.
|
|
93
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
94
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
95
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
96
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
97
|
-
Returns:
|
|
98
|
-
dict : {}
|
|
99
|
-
```json
|
|
100
|
-
{
|
|
101
|
-
"text" : "How may I assist you today?"
|
|
102
|
-
}
|
|
103
|
-
```
|
|
104
|
-
"""
|
|
105
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
106
|
-
if optimizer:
|
|
107
|
-
if optimizer in self.__available_optimizers:
|
|
108
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
109
|
-
conversation_prompt if conversationally else prompt
|
|
110
|
-
)
|
|
111
|
-
else:
|
|
112
|
-
raise Exception(
|
|
113
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
114
|
-
)
|
|
115
|
-
|
|
116
|
-
self.session.headers.update(self.headers)
|
|
117
|
-
payload = {
|
|
118
|
-
"query": conversation_prompt,
|
|
119
|
-
"search_uuid": uuid.uuid4().hex,
|
|
120
|
-
"visitor_uuid": uuid.uuid4().hex,
|
|
121
|
-
"token": uuid.uuid4().hex
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
response = self.session.post(
|
|
125
|
-
self.chat_endpoint, json=payload, timeout=self.timeout
|
|
126
|
-
)
|
|
127
|
-
if not response.ok:
|
|
128
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
129
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
130
|
-
)
|
|
131
|
-
|
|
132
|
-
# Extract the LLM response
|
|
133
|
-
llm_response = response.text.split("__LLM_RESPONSE__")[1].split("__RELATED_QUESTIONS__")[0].strip()
|
|
134
|
-
resp = dict(text=llm_response)
|
|
135
|
-
self.last_response.update(resp)
|
|
136
|
-
self.conversation.update_chat_history(
|
|
137
|
-
prompt, self.get_message(self.last_response)
|
|
138
|
-
)
|
|
139
|
-
return self.last_response
|
|
140
|
-
|
|
141
|
-
def chat(
|
|
142
|
-
self,
|
|
143
|
-
prompt: str,
|
|
144
|
-
stream: bool = False,
|
|
145
|
-
optimizer: str = None,
|
|
146
|
-
conversationally: bool = False,
|
|
147
|
-
) -> str:
|
|
148
|
-
"""Generate response `str`
|
|
149
|
-
Args:
|
|
150
|
-
prompt (str): Prompt to be send.
|
|
151
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
152
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
153
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
154
|
-
Returns:
|
|
155
|
-
str: Response generated
|
|
156
|
-
"""
|
|
157
|
-
|
|
158
|
-
return self.get_message(
|
|
159
|
-
self.ask(
|
|
160
|
-
prompt,
|
|
161
|
-
optimizer=optimizer,
|
|
162
|
-
conversationally=conversationally,
|
|
163
|
-
)
|
|
164
|
-
)
|
|
165
|
-
|
|
166
|
-
def get_message(self, response: dict) -> str:
|
|
167
|
-
"""Retrieves message only from response
|
|
168
|
-
|
|
169
|
-
Args:
|
|
170
|
-
response (dict): Response generated by `self.ask`
|
|
171
|
-
|
|
172
|
-
Returns:
|
|
173
|
-
str: Message extracted
|
|
174
|
-
"""
|
|
175
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
176
|
-
return response["text"]
|
|
177
|
-
if __name__ == '__main__':
|
|
178
|
-
from rich import print
|
|
179
|
-
ai = XDASH()
|
|
180
|
-
response = ai.chat("hi")
|
|
181
|
-
for chunk in response:
|
|
182
|
-
print(chunk, end="", flush=True)
|
webscout/websx_search.py
DELETED
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
from rich import print
|
|
3
|
-
|
|
4
|
-
def WEBSX(query):
|
|
5
|
-
url = 'https://searx.bnngpt.com/api/v1/scrape/'
|
|
6
|
-
data = {'query': query}
|
|
7
|
-
response = requests.post(url, data=data)
|
|
8
|
-
responses = response.json().get('responses')
|
|
9
|
-
return responses
|
|
10
|
-
|
|
11
|
-
if __name__ == "__main__":
|
|
12
|
-
# Example search query
|
|
13
|
-
search_query = "Python development tools"
|
|
14
|
-
|
|
15
|
-
# Call the WEBSX function with the search query
|
|
16
|
-
result = WEBSX(search_query)
|
|
17
|
-
|
|
18
|
-
# Pretty-print the JSON response
|
|
19
|
-
print(result)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|