webscout 5.6__py3-none-any.whl → 5.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +76 -2
- webscout/Agents/Onlinesearcher.py +123 -115
- webscout/Provider/ChatGPTES.py +239 -0
- webscout/Provider/Deepinfra.py +1 -1
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiforce.py +36 -13
- webscout/Provider/TTI/artbit.py +141 -0
- webscout/Provider/TTI/huggingface.py +155 -0
- webscout/Provider/__init__.py +13 -0
- webscout/Provider/bixin.py +264 -0
- webscout/Provider/genspark.py +46 -43
- webscout/Provider/llamatutor.py +222 -0
- webscout/Provider/meta.py +1 -1
- webscout/Provider/promptrefine.py +191 -0
- webscout/Provider/tutorai.py +354 -0
- webscout/Provider/twitterclone.py +260 -0
- webscout/__init__.py +2 -0
- webscout/requestsHTMLfix.py +775 -0
- webscout/version.py +1 -1
- {webscout-5.6.dist-info → webscout-5.8.dist-info}/METADATA +4 -3
- {webscout-5.6.dist-info → webscout-5.8.dist-info}/RECORD +25 -16
- {webscout-5.6.dist-info → webscout-5.8.dist-info}/LICENSE.md +0 -0
- {webscout-5.6.dist-info → webscout-5.8.dist-info}/WHEEL +0 -0
- {webscout-5.6.dist-info → webscout-5.8.dist-info}/entry_points.txt +0 -0
- {webscout-5.6.dist-info → webscout-5.8.dist-info}/top_level.txt +0 -0
webscout/Provider/genspark.py
CHANGED
|
@@ -1,13 +1,12 @@
|
|
|
1
1
|
import cloudscraper
|
|
2
2
|
from uuid import uuid4
|
|
3
3
|
import json
|
|
4
|
-
|
|
4
|
+
import re
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
7
|
from webscout.AIutel import AwesomePrompts
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
|
|
10
|
-
|
|
11
10
|
class Genspark(Provider):
|
|
12
11
|
"""
|
|
13
12
|
A class to interact with the Genspark.ai API.
|
|
@@ -25,7 +24,8 @@ class Genspark(Provider):
|
|
|
25
24
|
history_offset: int = 10250,
|
|
26
25
|
act: str = None,
|
|
27
26
|
) -> None:
|
|
28
|
-
"""
|
|
27
|
+
"""
|
|
28
|
+
Instantiates Genspark
|
|
29
29
|
|
|
30
30
|
Args:
|
|
31
31
|
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
@@ -67,11 +67,11 @@ class Genspark(Provider):
|
|
|
67
67
|
"session_id": uuid4().hex,
|
|
68
68
|
}
|
|
69
69
|
|
|
70
|
-
self.__available_optimizers =
|
|
70
|
+
self.__available_optimizers = [
|
|
71
71
|
method
|
|
72
72
|
for method in dir(Optimizers)
|
|
73
73
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
74
|
-
|
|
74
|
+
]
|
|
75
75
|
self.session.headers.update(self.headers)
|
|
76
76
|
Conversation.intro = (
|
|
77
77
|
AwesomePrompts().get_act(
|
|
@@ -94,7 +94,8 @@ class Genspark(Provider):
|
|
|
94
94
|
optimizer: str = None,
|
|
95
95
|
conversationally: bool = False,
|
|
96
96
|
) -> dict:
|
|
97
|
-
"""
|
|
97
|
+
"""
|
|
98
|
+
Chat with AI
|
|
98
99
|
|
|
99
100
|
Args:
|
|
100
101
|
prompt (str): Prompt to be send.
|
|
@@ -102,11 +103,12 @@ class Genspark(Provider):
|
|
|
102
103
|
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
103
104
|
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
104
105
|
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
106
|
+
|
|
105
107
|
Returns:
|
|
106
|
-
|
|
108
|
+
dict : {}
|
|
107
109
|
```json
|
|
108
110
|
{
|
|
109
|
-
|
|
111
|
+
"text" : "How may I assist you today?"
|
|
110
112
|
}
|
|
111
113
|
```
|
|
112
114
|
"""
|
|
@@ -121,11 +123,10 @@ class Genspark(Provider):
|
|
|
121
123
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
122
124
|
)
|
|
123
125
|
|
|
124
|
-
self.url =
|
|
125
|
-
f"https://www.genspark.ai/api/search/stream?query={conversation_prompt}"
|
|
126
|
-
)
|
|
126
|
+
self.url = f"https://www.genspark.ai/api/search/stream?query={conversation_prompt}"
|
|
127
127
|
|
|
128
128
|
payload = {}
|
|
129
|
+
|
|
129
130
|
def for_stream():
|
|
130
131
|
response = self.session.post(
|
|
131
132
|
self.url,
|
|
@@ -135,26 +136,24 @@ class Genspark(Provider):
|
|
|
135
136
|
stream=True,
|
|
136
137
|
timeout=self.timeout,
|
|
137
138
|
)
|
|
139
|
+
if not response.ok:
|
|
140
|
+
raise Exception(
|
|
141
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
142
|
+
)
|
|
138
143
|
|
|
139
|
-
|
|
144
|
+
full_response = ""
|
|
140
145
|
for line in response.iter_lines(decode_unicode=True):
|
|
141
146
|
if line:
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
data
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
]
|
|
153
|
-
partial_response = deep_dive_result["detailAnswer"]
|
|
154
|
-
self.last_response.update(dict(text=new_content))
|
|
155
|
-
yield new_content if raw else dict(text=new_content)
|
|
156
|
-
except json.JSONDecodeError:
|
|
157
|
-
print(f"Skipping invalid JSON line: {line}")
|
|
147
|
+
if line.startswith("data: "):
|
|
148
|
+
try:
|
|
149
|
+
data = json.loads(line[6:])
|
|
150
|
+
if data.get("type") == "result_field" and data["field_name"] == "streaming_summary":
|
|
151
|
+
full_response = data.get("field_value", "")
|
|
152
|
+
yield full_response if raw else {"text": full_response}
|
|
153
|
+
except json.JSONDecodeError as e:
|
|
154
|
+
print(f"Error decoding JSON: {line} - {e}")
|
|
155
|
+
|
|
156
|
+
self.last_response.update({"text": full_response})
|
|
158
157
|
self.conversation.update_chat_history(
|
|
159
158
|
prompt, self.get_message(self.last_response)
|
|
160
159
|
)
|
|
@@ -173,7 +172,8 @@ class Genspark(Provider):
|
|
|
173
172
|
optimizer: str = None,
|
|
174
173
|
conversationally: bool = False,
|
|
175
174
|
) -> str:
|
|
176
|
-
"""
|
|
175
|
+
"""
|
|
176
|
+
Generate response `str`
|
|
177
177
|
Args:
|
|
178
178
|
prompt (str): Prompt to be send.
|
|
179
179
|
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
@@ -190,19 +190,14 @@ class Genspark(Provider):
|
|
|
190
190
|
yield self.get_message(response)
|
|
191
191
|
|
|
192
192
|
def for_non_stream():
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
prompt,
|
|
196
|
-
False,
|
|
197
|
-
optimizer=optimizer,
|
|
198
|
-
conversationally=conversationally,
|
|
199
|
-
)
|
|
200
|
-
)
|
|
193
|
+
response = self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
194
|
+
return self.get_message(response)
|
|
201
195
|
|
|
202
196
|
return for_stream() if stream else for_non_stream()
|
|
203
197
|
|
|
204
198
|
def get_message(self, response: dict) -> str:
|
|
205
|
-
"""
|
|
199
|
+
"""
|
|
200
|
+
Retrieves message only from response
|
|
206
201
|
|
|
207
202
|
Args:
|
|
208
203
|
response (dict): Response generated by `self.ask`
|
|
@@ -211,12 +206,20 @@ class Genspark(Provider):
|
|
|
211
206
|
str: Message extracted
|
|
212
207
|
"""
|
|
213
208
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
209
|
+
text = response.get('text', '')
|
|
210
|
+
# Remove footnote references from the text
|
|
211
|
+
text = re.sub(r"\[.*?\]\(.*?\)", "", text)
|
|
212
|
+
try:
|
|
213
|
+
# Attempt to parse the text as JSON
|
|
214
|
+
text_json = json.loads(text)
|
|
215
|
+
return text_json.get('detailAnswer', text)
|
|
216
|
+
except json.JSONDecodeError:
|
|
217
|
+
# If text is not JSON, return it as is
|
|
218
|
+
return text
|
|
219
|
+
|
|
220
|
+
if __name__ == '__main__':
|
|
218
221
|
from rich import print
|
|
219
222
|
ai = Genspark()
|
|
220
|
-
response = ai.chat("
|
|
223
|
+
response = ai.chat(input(">>> "))
|
|
221
224
|
for chunk in response:
|
|
222
225
|
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
|
|
4
|
+
from webscout.AIutel import Optimizers
|
|
5
|
+
from webscout.AIutel import Conversation
|
|
6
|
+
from webscout.AIutel import AwesomePrompts
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
|
|
10
|
+
class LlamaTutor(Provider):
|
|
11
|
+
"""
|
|
12
|
+
A class to interact with the LlamaTutor API (Together.ai).
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
is_conversation: bool = True,
|
|
18
|
+
max_tokens: int = 600,
|
|
19
|
+
timeout: int = 30,
|
|
20
|
+
intro: str = None,
|
|
21
|
+
filepath: str = None,
|
|
22
|
+
update_file: bool = True,
|
|
23
|
+
proxies: dict = {},
|
|
24
|
+
history_offset: int = 10250,
|
|
25
|
+
act: str = None,
|
|
26
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
27
|
+
):
|
|
28
|
+
"""
|
|
29
|
+
Initializes the LlamaTutor API with given parameters.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
33
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
34
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
35
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
36
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
37
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
38
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
39
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
40
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
41
|
+
system_prompt (str, optional): System prompt for LlamaTutor.
|
|
42
|
+
Defaults to "You are a helpful AI assistant.".
|
|
43
|
+
"""
|
|
44
|
+
self.session = requests.Session()
|
|
45
|
+
self.is_conversation = is_conversation
|
|
46
|
+
self.max_tokens_to_sample = max_tokens
|
|
47
|
+
self.api_endpoint = "https://llamatutor.together.ai/api/getChat"
|
|
48
|
+
self.stream_chunk_size = 64
|
|
49
|
+
self.timeout = timeout
|
|
50
|
+
self.last_response = {}
|
|
51
|
+
self.system_prompt = system_prompt
|
|
52
|
+
self.headers = {
|
|
53
|
+
"Content-Type": "application/json",
|
|
54
|
+
"Accept": "*/*",
|
|
55
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
56
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
57
|
+
"DNT": "1",
|
|
58
|
+
"Origin": "https://llamatutor.together.ai",
|
|
59
|
+
"Referer": "https://llamatutor.together.ai/",
|
|
60
|
+
"Sec-Ch-Ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
61
|
+
"Sec-Ch-Ua-Mobile": "?0",
|
|
62
|
+
"Sec-Ch-Ua-Platform": '"Windows"',
|
|
63
|
+
"Sec-Fetch-Dest": "empty",
|
|
64
|
+
"Sec-Fetch-Mode": "cors",
|
|
65
|
+
"Sec-Fetch-Site": "same-origin",
|
|
66
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0"
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
self.__available_optimizers = (
|
|
70
|
+
method
|
|
71
|
+
for method in dir(Optimizers)
|
|
72
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
73
|
+
)
|
|
74
|
+
self.session.headers.update(self.headers)
|
|
75
|
+
Conversation.intro = (
|
|
76
|
+
AwesomePrompts().get_act(
|
|
77
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
78
|
+
)
|
|
79
|
+
if act
|
|
80
|
+
else intro or Conversation.intro
|
|
81
|
+
)
|
|
82
|
+
self.conversation = Conversation(
|
|
83
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
84
|
+
)
|
|
85
|
+
self.conversation.history_offset = history_offset
|
|
86
|
+
self.session.proxies = proxies
|
|
87
|
+
|
|
88
|
+
def ask(
|
|
89
|
+
self,
|
|
90
|
+
prompt: str,
|
|
91
|
+
stream: bool = False,
|
|
92
|
+
raw: bool = False,
|
|
93
|
+
optimizer: str = None,
|
|
94
|
+
conversationally: bool = False,
|
|
95
|
+
) -> dict:
|
|
96
|
+
"""Chat with LlamaTutor
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
prompt (str): Prompt to be send.
|
|
100
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
101
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
102
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
103
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
104
|
+
Returns:
|
|
105
|
+
dict : {}
|
|
106
|
+
```json
|
|
107
|
+
{
|
|
108
|
+
"text" : "How may I assist you today?"
|
|
109
|
+
}
|
|
110
|
+
```
|
|
111
|
+
"""
|
|
112
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
113
|
+
if optimizer:
|
|
114
|
+
if optimizer in self.__available_optimizers:
|
|
115
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
116
|
+
conversation_prompt if conversationally else prompt
|
|
117
|
+
)
|
|
118
|
+
else:
|
|
119
|
+
raise Exception(
|
|
120
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
payload = {
|
|
124
|
+
"messages": [
|
|
125
|
+
{
|
|
126
|
+
"role": "system",
|
|
127
|
+
"content": self.system_prompt
|
|
128
|
+
},
|
|
129
|
+
{
|
|
130
|
+
"role": "user",
|
|
131
|
+
"content": conversation_prompt
|
|
132
|
+
}
|
|
133
|
+
]
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
def for_stream():
|
|
137
|
+
try:
|
|
138
|
+
response = requests.post(self.api_endpoint, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout)
|
|
139
|
+
response.raise_for_status()
|
|
140
|
+
|
|
141
|
+
# Stream and process the response line by line
|
|
142
|
+
full_response = ''
|
|
143
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
144
|
+
if line:
|
|
145
|
+
decoded_line = line.decode('utf-8')
|
|
146
|
+
if decoded_line.startswith("data: "):
|
|
147
|
+
json_data = json.loads(decoded_line[6:])
|
|
148
|
+
if "text" in json_data:
|
|
149
|
+
full_response += json_data["text"]
|
|
150
|
+
yield json_data["text"] if raw else dict(text=full_response)
|
|
151
|
+
|
|
152
|
+
self.last_response.update(dict(text=full_response))
|
|
153
|
+
self.conversation.update_chat_history(
|
|
154
|
+
prompt, self.get_message(self.last_response)
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
except requests.exceptions.HTTPError as http_err:
|
|
158
|
+
raise exceptions.FailedToGenerateResponseError(f"HTTP error occurred: {http_err}")
|
|
159
|
+
except requests.exceptions.RequestException as err:
|
|
160
|
+
raise exceptions.FailedToGenerateResponseError(f"An error occurred: {err}")
|
|
161
|
+
|
|
162
|
+
def for_non_stream():
|
|
163
|
+
for _ in for_stream():
|
|
164
|
+
pass
|
|
165
|
+
return self.last_response
|
|
166
|
+
|
|
167
|
+
return for_stream() if stream else for_non_stream()
|
|
168
|
+
|
|
169
|
+
def chat(
|
|
170
|
+
self,
|
|
171
|
+
prompt: str,
|
|
172
|
+
stream: bool = False,
|
|
173
|
+
optimizer: str = None,
|
|
174
|
+
conversationally: bool = False,
|
|
175
|
+
) -> str:
|
|
176
|
+
"""Generate response `str`
|
|
177
|
+
Args:
|
|
178
|
+
prompt (str): Prompt to be send.
|
|
179
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
180
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
181
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
182
|
+
Returns:
|
|
183
|
+
str: Response generated
|
|
184
|
+
"""
|
|
185
|
+
|
|
186
|
+
def for_stream():
|
|
187
|
+
for response in self.ask(
|
|
188
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
189
|
+
):
|
|
190
|
+
yield self.get_message(response)
|
|
191
|
+
|
|
192
|
+
def for_non_stream():
|
|
193
|
+
return self.get_message(
|
|
194
|
+
self.ask(
|
|
195
|
+
prompt,
|
|
196
|
+
False,
|
|
197
|
+
optimizer=optimizer,
|
|
198
|
+
conversationally=conversationally,
|
|
199
|
+
)
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
return for_stream() if stream else for_non_stream()
|
|
203
|
+
|
|
204
|
+
def get_message(self, response: dict) -> str:
|
|
205
|
+
"""Retrieves message only from response
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
response (dict): Response generated by `self.ask`
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
str: Message extracted
|
|
212
|
+
"""
|
|
213
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
214
|
+
return response["text"]
|
|
215
|
+
|
|
216
|
+
if __name__ == "__main__":
|
|
217
|
+
from rich import print
|
|
218
|
+
|
|
219
|
+
ai = LlamaTutor()
|
|
220
|
+
response = ai.chat(input(">>> "))
|
|
221
|
+
for chunk in response:
|
|
222
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/meta.py
CHANGED
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import uuid
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from fake_useragent import UserAgent
|
|
10
|
+
|
|
11
|
+
class PromptRefine(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the PromptRefine API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
is_conversation: bool = True,
|
|
19
|
+
max_tokens: int = 600,
|
|
20
|
+
timeout: int = 30,
|
|
21
|
+
intro: str = None,
|
|
22
|
+
filepath: str = None,
|
|
23
|
+
update_file: bool = True,
|
|
24
|
+
proxies: dict = {},
|
|
25
|
+
history_offset: int = 10250,
|
|
26
|
+
act: str = None,
|
|
27
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
28
|
+
model: str = "openai/gpt-4o", # Default model
|
|
29
|
+
):
|
|
30
|
+
"""
|
|
31
|
+
Initializes the PromptRefine API with given parameters.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
35
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
36
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
37
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
38
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
39
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
40
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
41
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
42
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
43
|
+
system_prompt (str, optional): System prompt for PromptRefine. Defaults to "You are a helpful AI assistant.".
|
|
44
|
+
model (str, optional): Model to use for generation. Defaults to "openai/gpt-4o".
|
|
45
|
+
"""
|
|
46
|
+
self.session = requests.Session()
|
|
47
|
+
self.is_conversation = is_conversation
|
|
48
|
+
self.max_tokens_to_sample = max_tokens
|
|
49
|
+
self.api_endpoint = 'https://www.promptrefine.com/api/completion'
|
|
50
|
+
self.stream_chunk_size = 64
|
|
51
|
+
self.timeout = timeout
|
|
52
|
+
self.last_response = {}
|
|
53
|
+
self.system_prompt = system_prompt
|
|
54
|
+
self.model = model
|
|
55
|
+
self.headers = {
|
|
56
|
+
'origin': 'https://www.promptrefine.com',
|
|
57
|
+
'referer': 'https://www.promptrefine.com/prompt/new',
|
|
58
|
+
'user-agent': UserAgent().random
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
self.__available_optimizers = (
|
|
62
|
+
method
|
|
63
|
+
for method in dir(Optimizers)
|
|
64
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
65
|
+
)
|
|
66
|
+
self.session.headers.update(self.headers)
|
|
67
|
+
Conversation.intro = (
|
|
68
|
+
AwesomePrompts().get_act(
|
|
69
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
70
|
+
)
|
|
71
|
+
if act
|
|
72
|
+
else intro or Conversation.intro
|
|
73
|
+
)
|
|
74
|
+
self.conversation = Conversation(
|
|
75
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
76
|
+
)
|
|
77
|
+
self.conversation.history_offset = history_offset
|
|
78
|
+
self.session.proxies = proxies
|
|
79
|
+
|
|
80
|
+
def ask(
|
|
81
|
+
self,
|
|
82
|
+
prompt: str,
|
|
83
|
+
stream: bool = False,
|
|
84
|
+
raw: bool = False,
|
|
85
|
+
optimizer: str = None,
|
|
86
|
+
conversationally: bool = False,
|
|
87
|
+
) -> dict:
|
|
88
|
+
"""Chat with PromptRefine
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
prompt (str): Prompt to be send.
|
|
92
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
93
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
94
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
95
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
96
|
+
Returns:
|
|
97
|
+
dict : {}
|
|
98
|
+
```json
|
|
99
|
+
{
|
|
100
|
+
"text" : "How may I assist you today?"
|
|
101
|
+
}
|
|
102
|
+
```
|
|
103
|
+
"""
|
|
104
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
105
|
+
if optimizer:
|
|
106
|
+
if optimizer in self.__available_optimizers:
|
|
107
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
108
|
+
conversation_prompt if conversationally else prompt
|
|
109
|
+
)
|
|
110
|
+
else:
|
|
111
|
+
raise Exception(
|
|
112
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
payload = {
|
|
116
|
+
"messages": [
|
|
117
|
+
{"role": "system", "content": self.system_prompt},
|
|
118
|
+
{"role": "user", "content": conversation_prompt}
|
|
119
|
+
],
|
|
120
|
+
"variables": {},
|
|
121
|
+
"parameters": {},
|
|
122
|
+
"model": self.model,
|
|
123
|
+
"userId": str(uuid.uuid4()),
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
def for_stream():
|
|
127
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
|
|
128
|
+
if not response.ok:
|
|
129
|
+
raise Exception(
|
|
130
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
full_response = ""
|
|
134
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
135
|
+
if line:
|
|
136
|
+
full_response += line # No need to decode here
|
|
137
|
+
yield full_response if raw else dict(text=full_response)
|
|
138
|
+
self.last_response.update(dict(text=full_response))
|
|
139
|
+
self.conversation.update_chat_history(
|
|
140
|
+
prompt, self.get_message(self.last_response)
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
def for_non_stream():
|
|
144
|
+
for _ in for_stream():
|
|
145
|
+
pass
|
|
146
|
+
return self.last_response
|
|
147
|
+
|
|
148
|
+
return for_stream() if stream else for_non_stream()
|
|
149
|
+
|
|
150
|
+
def chat(
|
|
151
|
+
self,
|
|
152
|
+
prompt: str,
|
|
153
|
+
stream: bool = False,
|
|
154
|
+
optimizer: str = None,
|
|
155
|
+
conversationally: bool = False,
|
|
156
|
+
) -> str:
|
|
157
|
+
"""Generate response `str`
|
|
158
|
+
Args:
|
|
159
|
+
prompt (str): Prompt to be send.
|
|
160
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
161
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
162
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
163
|
+
Returns:
|
|
164
|
+
str: Response generated
|
|
165
|
+
"""
|
|
166
|
+
return self.get_message(
|
|
167
|
+
self.ask(
|
|
168
|
+
prompt,
|
|
169
|
+
optimizer=optimizer,
|
|
170
|
+
conversationally=conversationally,
|
|
171
|
+
)
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
def get_message(self, response: dict) -> str:
|
|
175
|
+
"""Retrieves message only from response
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
response (dict): Response generated by `self.ask`
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
str: Message extracted
|
|
182
|
+
"""
|
|
183
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
184
|
+
return response["text"]
|
|
185
|
+
|
|
186
|
+
if __name__ == '__main__':
|
|
187
|
+
from rich import print
|
|
188
|
+
ai = PromptRefine()
|
|
189
|
+
response = ai.chat(input(">>> "))
|
|
190
|
+
for chunk in response:
|
|
191
|
+
print(chunk, end="", flush=True)
|