webscout 4.5__py3-none-any.whl → 4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +10 -0
- webscout/Extra/gguf.py +1 -1
- webscout/Provider/BasedGPT.py +38 -36
- webscout/Provider/Blackboxai.py +14 -10
- webscout/Provider/DARKAI.py +207 -0
- webscout/Provider/Deepseek.py +79 -133
- webscout/Provider/Llama3.py +173 -0
- webscout/Provider/PizzaGPT.py +178 -0
- webscout/Provider/RUBIKSAI.py +201 -0
- webscout/Provider/__init__.py +14 -3
- webscout/Provider/koala.py +239 -0
- webscout/Provider/meta.py +778 -0
- webscout/__init__.py +1 -0
- webscout/exceptions.py +6 -0
- webscout/version.py +1 -1
- webscout/webai.py +15 -1
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/METADATA +40 -60
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/RECORD +22 -16
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/WHEEL +1 -1
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/LICENSE.md +0 -0
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/entry_points.txt +0 -0
- {webscout-4.5.dist-info → webscout-4.7.dist-info}/top_level.txt +0 -0
webscout/Provider/Deepseek.py
CHANGED
|
@@ -1,59 +1,42 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
from selenium import webdriver
|
|
4
|
-
from selenium.webdriver.chrome.options import Options
|
|
5
|
-
from selenium.webdriver.common.by import By
|
|
6
|
-
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
-
import click
|
|
9
1
|
import requests
|
|
10
|
-
from requests import get
|
|
11
|
-
from uuid import uuid4
|
|
12
|
-
from re import findall
|
|
13
|
-
from requests.exceptions import RequestException
|
|
14
|
-
from curl_cffi.requests import get, RequestsError
|
|
15
|
-
import g4f
|
|
16
|
-
from random import randint
|
|
17
|
-
from PIL import Image
|
|
18
|
-
import io
|
|
19
|
-
import re
|
|
20
2
|
import json
|
|
21
|
-
import
|
|
3
|
+
from typing import Any, AsyncGenerator, Dict
|
|
4
|
+
|
|
22
5
|
from ..AIutel import Optimizers
|
|
23
6
|
from ..AIutel import Conversation
|
|
24
7
|
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
25
8
|
from ..AIbase import Provider, AsyncProvider
|
|
26
|
-
from Helpingai_T2 import Perplexity
|
|
27
9
|
from webscout import exceptions
|
|
28
|
-
from typing import Any, AsyncGenerator, Dict, Optional
|
|
29
|
-
import logging
|
|
30
|
-
import httpx
|
|
31
|
-
import os
|
|
32
|
-
from dotenv import load_dotenv; load_dotenv()
|
|
33
10
|
|
|
34
|
-
#-----------------------------------------------DeepSeek--------------------------------------------
|
|
35
11
|
class DeepSeek(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the Deepseek API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
36
16
|
def __init__(
|
|
37
17
|
self,
|
|
38
|
-
api_key
|
|
18
|
+
api_key,
|
|
19
|
+
model: str = "deepseek_chat", # deepseek_chat, deepseek_code
|
|
20
|
+
temperature: float = 0,
|
|
39
21
|
is_conversation: bool = True,
|
|
40
|
-
max_tokens: int = 600,
|
|
41
22
|
timeout: int = 30,
|
|
23
|
+
max_tokens: int = 4000,
|
|
42
24
|
intro: str = None,
|
|
43
25
|
filepath: str = None,
|
|
44
26
|
update_file: bool = True,
|
|
45
27
|
proxies: dict = {},
|
|
46
28
|
history_offset: int = 10250,
|
|
47
29
|
act: str = None,
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
"""Initializes DeepSeek
|
|
30
|
+
) -> None:
|
|
31
|
+
"""
|
|
32
|
+
Initializes the Deepseek API with given parameters.
|
|
52
33
|
|
|
53
34
|
Args:
|
|
54
|
-
|
|
35
|
+
api_token (str): The API token for authentication.
|
|
36
|
+
api_endpoint (str): The API endpoint to use for requests.
|
|
37
|
+
model (str): The AI model to use for text generation.
|
|
38
|
+
temperature (float): The temperature parameter for the model.
|
|
55
39
|
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
56
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
57
40
|
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
58
41
|
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
59
42
|
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
@@ -61,28 +44,41 @@ class DeepSeek(Provider):
|
|
|
61
44
|
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
62
45
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
63
46
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
64
|
-
model_type (str, optional): DeepSeek model type. Defaults to 'deepseek_chat'.
|
|
65
|
-
temperature (float, optional): Creativity level of the response. Defaults to 1.0.
|
|
66
47
|
"""
|
|
67
48
|
self.api_token = api_key
|
|
68
|
-
self.
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
self.
|
|
72
|
-
self.api_session = requests.Session()
|
|
73
|
-
self.api_session.headers.update(self.auth_headers)
|
|
74
|
-
|
|
49
|
+
self.api_endpoint = "https://chat.deepseek.com/api/v0/chat/completions"
|
|
50
|
+
self.model = model
|
|
51
|
+
self.temperature = temperature
|
|
52
|
+
self.session = requests.Session()
|
|
75
53
|
self.is_conversation = is_conversation
|
|
76
54
|
self.max_tokens_to_sample = max_tokens
|
|
77
55
|
self.timeout = timeout
|
|
78
56
|
self.last_response = {}
|
|
79
|
-
self.
|
|
80
|
-
|
|
57
|
+
self.headers = {
|
|
58
|
+
"authority": "chat.deepseek.com",
|
|
59
|
+
"accept": "*/*",
|
|
60
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
61
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
62
|
+
"authorization": f"Bearer {self.api_token}",
|
|
63
|
+
"content-type": "application/json",
|
|
64
|
+
"dnt": "1",
|
|
65
|
+
"origin": "https://chat.deepseek.com",
|
|
66
|
+
"referer": "https://chat.deepseek.com",
|
|
67
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
68
|
+
"sec-ch-ua-mobile": "?0",
|
|
69
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
70
|
+
"sec-fetch-dest": "empty",
|
|
71
|
+
"sec-fetch-mode": "cors",
|
|
72
|
+
"sec-fetch-site": "same-origin",
|
|
73
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
|
|
74
|
+
"x-app-version": "20240126.0"
|
|
75
|
+
}
|
|
81
76
|
self.__available_optimizers = (
|
|
82
77
|
method
|
|
83
78
|
for method in dir(Optimizers)
|
|
84
79
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
85
80
|
)
|
|
81
|
+
self.session.headers.update(self.headers)
|
|
86
82
|
Conversation.intro = (
|
|
87
83
|
AwesomePrompts().get_act(
|
|
88
84
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -94,54 +90,7 @@ class DeepSeek(Provider):
|
|
|
94
90
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
95
91
|
)
|
|
96
92
|
self.conversation.history_offset = history_offset
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
def clear_chat(self) -> None:
|
|
100
|
-
"""
|
|
101
|
-
Clears the chat context by making a POST request to the clear_context endpoint.
|
|
102
|
-
"""
|
|
103
|
-
clear_payload = {"model_class": "deepseek_chat", "append_welcome_message": False}
|
|
104
|
-
clear_response = self.api_session.post(f'{self.api_base_url}/clear_context', json=clear_payload)
|
|
105
|
-
clear_response.raise_for_status() # Raises an HTTPError if the HTTP request returned an unsuccessful status code
|
|
106
|
-
|
|
107
|
-
def generate(self, user_message: str, response_temperature: float = 1.0, model_type: Optional[str] = "deepseek_chat", verbose: bool = False) -> str:
|
|
108
|
-
"""
|
|
109
|
-
Generates a response from the DeepSeek API based on the provided message.
|
|
110
|
-
|
|
111
|
-
Args:
|
|
112
|
-
user_message (str): The message to send to the chat API.
|
|
113
|
-
response_temperature (float, optional): The creativity level of the response. Defaults to 1.0.
|
|
114
|
-
model_type (str, optional): The model class to be used for the chat session.
|
|
115
|
-
verbose (bool, optional): Whether to print the response content. Defaults to False.
|
|
116
|
-
|
|
117
|
-
Returns:
|
|
118
|
-
str: The concatenated response content received from the API.
|
|
119
|
-
|
|
120
|
-
Available models:
|
|
121
|
-
- deepseek_chat
|
|
122
|
-
- deepseek_code
|
|
123
|
-
"""
|
|
124
|
-
request_payload = {
|
|
125
|
-
"message": user_message,
|
|
126
|
-
"stream": True,
|
|
127
|
-
"model_preference": None,
|
|
128
|
-
"model_class": model_type,
|
|
129
|
-
"temperature": response_temperature
|
|
130
|
-
}
|
|
131
|
-
api_response = self.api_session.post(f'{self.api_base_url}/completions', json=request_payload, stream=True)
|
|
132
|
-
api_response.raise_for_status()
|
|
133
|
-
|
|
134
|
-
combined_response = ""
|
|
135
|
-
for response_line in api_response.iter_lines(decode_unicode=True, chunk_size=1):
|
|
136
|
-
if response_line:
|
|
137
|
-
cleaned_line = re.sub("data:", "", response_line)
|
|
138
|
-
response_json = json.loads(cleaned_line)
|
|
139
|
-
response_content = response_json['choices'][0]['delta']['content']
|
|
140
|
-
if response_content and not re.match(r'^\s{5,}$', response_content):
|
|
141
|
-
if verbose: print(response_content, end="", flush=True)
|
|
142
|
-
combined_response += response_content
|
|
143
|
-
|
|
144
|
-
return combined_response
|
|
93
|
+
self.session.proxies = proxies
|
|
145
94
|
|
|
146
95
|
def ask(
|
|
147
96
|
self,
|
|
@@ -150,40 +99,19 @@ class DeepSeek(Provider):
|
|
|
150
99
|
raw: bool = False,
|
|
151
100
|
optimizer: str = None,
|
|
152
101
|
conversationally: bool = False,
|
|
153
|
-
) ->
|
|
154
|
-
"""
|
|
102
|
+
) -> Dict[str, Any]:
|
|
103
|
+
"""
|
|
104
|
+
Sends a prompt to the Deepseek AI API and returns the response.
|
|
155
105
|
|
|
156
106
|
Args:
|
|
157
|
-
prompt
|
|
158
|
-
stream (bool, optional):
|
|
159
|
-
raw (bool, optional):
|
|
160
|
-
optimizer (str, optional):
|
|
161
|
-
conversationally (bool, optional):
|
|
107
|
+
prompt: The text prompt to generate text from.
|
|
108
|
+
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
109
|
+
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
110
|
+
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
111
|
+
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
112
|
+
|
|
162
113
|
Returns:
|
|
163
|
-
|
|
164
|
-
```json
|
|
165
|
-
{
|
|
166
|
-
"id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
|
|
167
|
-
"object": "chat.completion",
|
|
168
|
-
"created": 1704623244,
|
|
169
|
-
"model": "gpt-3.5-turbo",
|
|
170
|
-
"usage": {
|
|
171
|
-
"prompt_tokens": 0,
|
|
172
|
-
"completion_tokens": 0,
|
|
173
|
-
"total_tokens": 0
|
|
174
|
-
},
|
|
175
|
-
"choices": [
|
|
176
|
-
{
|
|
177
|
-
"message": {
|
|
178
|
-
"role": "assistant",
|
|
179
|
-
"content": "Hello! How can I assist you today?"
|
|
180
|
-
},
|
|
181
|
-
"finish_reason": "stop",
|
|
182
|
-
"index": 0
|
|
183
|
-
}
|
|
184
|
-
]
|
|
185
|
-
}
|
|
186
|
-
```
|
|
114
|
+
The response from the API.
|
|
187
115
|
"""
|
|
188
116
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
189
117
|
if optimizer:
|
|
@@ -195,23 +123,41 @@ class DeepSeek(Provider):
|
|
|
195
123
|
raise Exception(
|
|
196
124
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
197
125
|
)
|
|
126
|
+
|
|
127
|
+
payload = {
|
|
128
|
+
"message": conversation_prompt,
|
|
129
|
+
"stream": True,
|
|
130
|
+
"model_preference": None,
|
|
131
|
+
"model_class": self.model,
|
|
132
|
+
"temperature": self.temperature
|
|
133
|
+
}
|
|
198
134
|
|
|
199
135
|
def for_stream():
|
|
200
|
-
response = self.
|
|
201
|
-
|
|
202
|
-
response_temperature=self.temperature,
|
|
203
|
-
model_type=self.model_type,
|
|
204
|
-
verbose=False,
|
|
136
|
+
response = self.session.post(
|
|
137
|
+
self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
|
|
205
138
|
)
|
|
206
|
-
|
|
207
|
-
|
|
139
|
+
|
|
140
|
+
if not response.ok:
|
|
141
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
142
|
+
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
143
|
+
)
|
|
144
|
+
streaming_response = ""
|
|
145
|
+
collected_messages = []
|
|
146
|
+
for line in response.iter_lines():
|
|
147
|
+
if line:
|
|
148
|
+
json_line = json.loads(line.decode('utf-8').split('data: ')[1])
|
|
149
|
+
if 'choices' in json_line and len(json_line['choices']) > 0:
|
|
150
|
+
delta_content = json_line['choices'][0].get('delta', {}).get('content')
|
|
151
|
+
if delta_content:
|
|
152
|
+
collected_messages.append(delta_content)
|
|
153
|
+
streaming_response = ''.join(collected_messages)
|
|
154
|
+
yield delta_content if raw else dict(text=streaming_response)
|
|
155
|
+
self.last_response.update(dict(text=streaming_response))
|
|
208
156
|
self.conversation.update_chat_history(
|
|
209
157
|
prompt, self.get_message(self.last_response)
|
|
210
158
|
)
|
|
211
|
-
yield dict(text=response) if raw else dict(text=response)
|
|
212
159
|
|
|
213
160
|
def for_non_stream():
|
|
214
|
-
# let's make use of stream
|
|
215
161
|
for _ in for_stream():
|
|
216
162
|
pass
|
|
217
163
|
return self.last_response
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from webscout.AIutel import Optimizers
|
|
4
|
+
from webscout.AIutel import Conversation
|
|
5
|
+
from webscout.AIutel import AwesomePrompts
|
|
6
|
+
from webscout.AIbase import Provider
|
|
7
|
+
|
|
8
|
+
class LLAMA3(Provider):
|
|
9
|
+
def __init__(
|
|
10
|
+
self,
|
|
11
|
+
is_conversation: bool = True,
|
|
12
|
+
max_tokens: int = 600,
|
|
13
|
+
timeout: int = 30,
|
|
14
|
+
intro: str = None,
|
|
15
|
+
filepath: str = None,
|
|
16
|
+
update_file: bool = True,
|
|
17
|
+
proxies: dict = {},
|
|
18
|
+
history_offset: int = 10250,
|
|
19
|
+
act: str = None,
|
|
20
|
+
model: str = "llama3-70b", # model= llama3-70b, llama3-8b, llama3-405b
|
|
21
|
+
system: str = "Answer as concisely as possible.",
|
|
22
|
+
):
|
|
23
|
+
"""Instantiates Snova
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
27
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
28
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
29
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
30
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
31
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
32
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
33
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
34
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
35
|
+
model (str, optional): Snova model name. Defaults to "llama3-70b".
|
|
36
|
+
system (str, optional): System prompt for Snova. Defaults to "Answer as concisely as possible.".
|
|
37
|
+
"""
|
|
38
|
+
self.session = requests.Session()
|
|
39
|
+
self.is_conversation = is_conversation
|
|
40
|
+
self.max_tokens_to_sample = max_tokens
|
|
41
|
+
self.timeout = timeout
|
|
42
|
+
self.model = model
|
|
43
|
+
self.system = system
|
|
44
|
+
self.last_response = {}
|
|
45
|
+
self.env_type = "tp16405b" if "405b" in model else "tp16"
|
|
46
|
+
self.headers = {'content-type': 'application/json'}
|
|
47
|
+
|
|
48
|
+
self.__available_optimizers = (
|
|
49
|
+
method
|
|
50
|
+
for method in dir(Optimizers)
|
|
51
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
52
|
+
)
|
|
53
|
+
self.session.headers.update(self.headers)
|
|
54
|
+
Conversation.intro = (
|
|
55
|
+
AwesomePrompts().get_act(
|
|
56
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
57
|
+
)
|
|
58
|
+
if act
|
|
59
|
+
else intro or Conversation.intro
|
|
60
|
+
)
|
|
61
|
+
self.conversation = Conversation(
|
|
62
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
63
|
+
)
|
|
64
|
+
self.conversation.history_offset = history_offset
|
|
65
|
+
self.session.proxies = proxies
|
|
66
|
+
|
|
67
|
+
def ask(
|
|
68
|
+
self,
|
|
69
|
+
prompt: str,
|
|
70
|
+
stream: bool = False,
|
|
71
|
+
raw: bool = False,
|
|
72
|
+
optimizer: str = None,
|
|
73
|
+
conversationally: bool = False,
|
|
74
|
+
) -> dict:
|
|
75
|
+
"""Chat with AI
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
prompt (str): Prompt to be send.
|
|
79
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
80
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
81
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
82
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
83
|
+
Returns:
|
|
84
|
+
dict : {}
|
|
85
|
+
```json
|
|
86
|
+
{
|
|
87
|
+
"text" : "How may I assist you today?"
|
|
88
|
+
}
|
|
89
|
+
```
|
|
90
|
+
"""
|
|
91
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
92
|
+
if optimizer:
|
|
93
|
+
if optimizer in self.__available_optimizers:
|
|
94
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
95
|
+
conversation_prompt if conversationally else prompt
|
|
96
|
+
)
|
|
97
|
+
else:
|
|
98
|
+
raise Exception(
|
|
99
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
100
|
+
)
|
|
101
|
+
data = {'body': {'messages': [{'role': 'system', 'content': self.system}, {'role': 'user', 'content': conversation_prompt}], 'stream': True, 'model': self.model}, 'env_type': self.env_type}
|
|
102
|
+
|
|
103
|
+
def for_stream(data=data): # Pass data as a default argument
|
|
104
|
+
response = self.session.post('https://fast.snova.ai/api/completion', headers=self.headers, json=data, stream=True, timeout=self.timeout)
|
|
105
|
+
output = ''
|
|
106
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
107
|
+
if line.startswith('data:'):
|
|
108
|
+
try:
|
|
109
|
+
data = json.loads(line[len('data: '):])
|
|
110
|
+
output += data.get("choices", [{}])[0].get("delta", {}).get("content", '')
|
|
111
|
+
self.last_response.update(dict(text=output))
|
|
112
|
+
yield data if raw else dict(text=output)
|
|
113
|
+
except json.JSONDecodeError:
|
|
114
|
+
if line[len('data: '):] == '[DONE]':
|
|
115
|
+
break
|
|
116
|
+
self.conversation.update_chat_history(
|
|
117
|
+
prompt, self.get_message(self.last_response)
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
def for_non_stream():
|
|
121
|
+
for _ in for_stream():
|
|
122
|
+
pass
|
|
123
|
+
return self.last_response
|
|
124
|
+
|
|
125
|
+
return for_stream() if stream else for_non_stream()
|
|
126
|
+
|
|
127
|
+
def chat(
|
|
128
|
+
self,
|
|
129
|
+
prompt: str,
|
|
130
|
+
stream: bool = False,
|
|
131
|
+
optimizer: str = None,
|
|
132
|
+
conversationally: bool = False,
|
|
133
|
+
) -> str:
|
|
134
|
+
"""Generate response `str`
|
|
135
|
+
Args:
|
|
136
|
+
prompt (str): Prompt to be send.
|
|
137
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
138
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
139
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
140
|
+
Returns:
|
|
141
|
+
str: Response generated
|
|
142
|
+
"""
|
|
143
|
+
|
|
144
|
+
def for_stream():
|
|
145
|
+
for response in self.ask(
|
|
146
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
147
|
+
):
|
|
148
|
+
yield self.get_message(response)
|
|
149
|
+
|
|
150
|
+
def for_non_stream():
|
|
151
|
+
return self.get_message(
|
|
152
|
+
self.ask(
|
|
153
|
+
prompt,
|
|
154
|
+
False,
|
|
155
|
+
optimizer=optimizer,
|
|
156
|
+
conversationally=conversationally,
|
|
157
|
+
)
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
return for_stream() if stream else for_non_stream()
|
|
161
|
+
|
|
162
|
+
def get_message(self, response: dict) -> str:
|
|
163
|
+
"""Retrieves message only from response
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
response (dict): Response generated by `self.ask`
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
str: Message extracted
|
|
170
|
+
"""
|
|
171
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
172
|
+
return response["text"]
|
|
173
|
+
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
from typing import Any, AsyncGenerator, Dict, Optional
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class PIZZAGPT(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the PizzaGPT API.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
is_conversation: bool = True,
|
|
20
|
+
max_tokens: int = 600,
|
|
21
|
+
timeout: int = 30,
|
|
22
|
+
intro: str = None,
|
|
23
|
+
filepath: str = None,
|
|
24
|
+
update_file: bool = True,
|
|
25
|
+
proxies: dict = {},
|
|
26
|
+
history_offset: int = 10250,
|
|
27
|
+
act: str = None,
|
|
28
|
+
) -> None:
|
|
29
|
+
"""
|
|
30
|
+
Initializes the PizzaGPT API with given parameters.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
34
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
35
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
36
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
37
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
38
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
39
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
40
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
41
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
42
|
+
"""
|
|
43
|
+
self.session = requests.Session()
|
|
44
|
+
self.is_conversation = is_conversation
|
|
45
|
+
self.max_tokens_to_sample = max_tokens
|
|
46
|
+
self.api_endpoint = "https://www.pizzagpt.it/api/chatx-completion"
|
|
47
|
+
self.stream_chunk_size = 64
|
|
48
|
+
self.timeout = timeout
|
|
49
|
+
self.last_response = {}
|
|
50
|
+
self.headers = {
|
|
51
|
+
"accept": "application/json",
|
|
52
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
53
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
54
|
+
"content-length": "17",
|
|
55
|
+
"content-type": "application/json",
|
|
56
|
+
"dnt": "1",
|
|
57
|
+
"origin": "https://www.pizzagpt.it",
|
|
58
|
+
"priority": "u=1, i",
|
|
59
|
+
"referer": "https://www.pizzagpt.it/en",
|
|
60
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
61
|
+
"sec-ch-ua-mobile": "?0",
|
|
62
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
63
|
+
"sec-fetch-dest": "empty",
|
|
64
|
+
"sec-fetch-mode": "cors",
|
|
65
|
+
"sec-fetch-site": "same-origin",
|
|
66
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
|
|
67
|
+
"x-secret": "Marinara"
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
self.__available_optimizers = (
|
|
71
|
+
method
|
|
72
|
+
for method in dir(Optimizers)
|
|
73
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
74
|
+
)
|
|
75
|
+
self.session.headers.update(self.headers)
|
|
76
|
+
Conversation.intro = (
|
|
77
|
+
AwesomePrompts().get_act(
|
|
78
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
79
|
+
)
|
|
80
|
+
if act
|
|
81
|
+
else intro or Conversation.intro
|
|
82
|
+
)
|
|
83
|
+
self.conversation = Conversation(
|
|
84
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
85
|
+
)
|
|
86
|
+
self.conversation.history_offset = history_offset
|
|
87
|
+
self.session.proxies = proxies
|
|
88
|
+
|
|
89
|
+
def ask(
|
|
90
|
+
self,
|
|
91
|
+
prompt: str,
|
|
92
|
+
stream: bool = False,
|
|
93
|
+
raw: bool = False,
|
|
94
|
+
optimizer: str = None,
|
|
95
|
+
conversationally: bool = False,
|
|
96
|
+
) -> dict:
|
|
97
|
+
"""Chat with AI
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
prompt (str): Prompt to be send.
|
|
101
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
102
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
103
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
104
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
105
|
+
Returns:
|
|
106
|
+
dict : {}
|
|
107
|
+
```json
|
|
108
|
+
{
|
|
109
|
+
"text" : "How may I assist you today?"
|
|
110
|
+
}
|
|
111
|
+
```
|
|
112
|
+
"""
|
|
113
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
114
|
+
if optimizer:
|
|
115
|
+
if optimizer in self.__available_optimizers:
|
|
116
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
117
|
+
conversation_prompt if conversationally else prompt
|
|
118
|
+
)
|
|
119
|
+
else:
|
|
120
|
+
raise Exception(
|
|
121
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
self.session.headers.update(self.headers)
|
|
125
|
+
payload = {"question": conversation_prompt}
|
|
126
|
+
|
|
127
|
+
response = self.session.post(
|
|
128
|
+
self.api_endpoint, json=payload, timeout=self.timeout
|
|
129
|
+
)
|
|
130
|
+
if not response.ok:
|
|
131
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
132
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
resp = response.json()
|
|
136
|
+
self.last_response.update(dict(text=resp['answer']['content']))
|
|
137
|
+
self.conversation.update_chat_history(
|
|
138
|
+
prompt, self.get_message(self.last_response)
|
|
139
|
+
)
|
|
140
|
+
return self.last_response # Return the updated last_response
|
|
141
|
+
|
|
142
|
+
def chat(
|
|
143
|
+
self,
|
|
144
|
+
prompt: str,
|
|
145
|
+
stream: bool = False,
|
|
146
|
+
optimizer: str = None,
|
|
147
|
+
conversationally: bool = False,
|
|
148
|
+
) -> str:
|
|
149
|
+
"""Generate response `str`
|
|
150
|
+
Args:
|
|
151
|
+
prompt (str): Prompt to be send.
|
|
152
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
153
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
154
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
155
|
+
Returns:
|
|
156
|
+
str: Response generated
|
|
157
|
+
"""
|
|
158
|
+
|
|
159
|
+
return self.get_message(
|
|
160
|
+
self.ask(
|
|
161
|
+
prompt,
|
|
162
|
+
optimizer=optimizer,
|
|
163
|
+
conversationally=conversationally,
|
|
164
|
+
)
|
|
165
|
+
)
|
|
166
|
+
def get_message(self, response: dict) -> str:
|
|
167
|
+
"""Retrieves message only from response
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
response (dict): Response generated by `self.ask`
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
str: Message extracted
|
|
174
|
+
"""
|
|
175
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
176
|
+
return response["text"]
|
|
177
|
+
if __name__ == "__main__":
|
|
178
|
+
print(PIZZAGPT().chat("hello"))
|