webscout 5.4__py3-none-any.whl → 5.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/Agents/Onlinesearcher.py +3 -3
- webscout/Agents/__init__.py +0 -1
- webscout/Agents/functioncall.py +3 -3
- webscout/Provider/Bing.py +243 -0
- webscout/Provider/Chatify.py +1 -1
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/DARKAI.py +1 -1
- webscout/Provider/DiscordRocks.py +109 -246
- webscout/Provider/Farfalle.py +1 -1
- webscout/Provider/Free2GPT.py +234 -0
- webscout/{Agents/ai.py → Provider/GPTWeb.py} +40 -33
- webscout/Provider/Llama3.py +65 -62
- webscout/Provider/OLLAMA.py +1 -1
- webscout/Provider/PizzaGPT.py +1 -1
- webscout/Provider/RUBIKSAI.py +13 -3
- webscout/Provider/Reka.py +0 -1
- webscout/Provider/TTI/Nexra.py +120 -0
- webscout/Provider/TTI/__init__.py +4 -1
- webscout/Provider/TTI/aiforce.py +137 -0
- webscout/Provider/TTI/blackboximage.py +153 -0
- webscout/Provider/TTI/deepinfra.py +2 -2
- webscout/Provider/TeachAnything.py +1 -1
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +11 -6
- webscout/Provider/{NetFly.py → aigames.py} +76 -79
- webscout/Provider/cleeai.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/felo_search.py +1 -1
- webscout/Provider/genspark.py +1 -1
- webscout/Provider/julius.py +7 -1
- webscout/Provider/lepton.py +1 -1
- webscout/Provider/meta.py +1 -1
- webscout/Provider/turboseek.py +1 -1
- webscout/Provider/upstage.py +230 -0
- webscout/Provider/x0gpt.py +1 -1
- webscout/Provider/xdash.py +1 -1
- webscout/Provider/yep.py +2 -2
- webscout/tempid.py +46 -2
- webscout/version.py +1 -1
- webscout/webai.py +1 -1
- webscout/webscout_search_async.py +9 -9
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/METADATA +7 -30
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/RECORD +47 -42
- webscout/Provider/ThinkAnyAI.py +0 -219
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/LICENSE.md +0 -0
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/WHEEL +0 -0
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/entry_points.txt +0 -0
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/top_level.txt +0 -0
|
@@ -1,132 +1,41 @@
|
|
|
1
|
+
from typing import Any, Dict
|
|
1
2
|
import requests
|
|
2
3
|
import json
|
|
3
|
-
from typing import Any, AsyncGenerator, Dict
|
|
4
4
|
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
-
from webscout.AIbase import
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
10
|
|
|
11
|
-
|
|
12
11
|
class DiscordRocks(Provider):
|
|
13
12
|
"""
|
|
14
|
-
A class to interact with the
|
|
13
|
+
A class to interact with the Airforce API.
|
|
15
14
|
"""
|
|
16
15
|
|
|
17
16
|
AVAILABLE_MODELS = [
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
"llama-3-70b-chat",
|
|
35
|
-
"llama-3-70b-chat-turbo",
|
|
36
|
-
"llama-3-8b-chat",
|
|
37
|
-
"llama-3-8b-chat-turbo",
|
|
38
|
-
"llama-3-70b-chat-lite",
|
|
39
|
-
"llama-3-8b-chat-lite",
|
|
40
|
-
"llama-2-70b-chat",
|
|
41
|
-
"llama-2-13b-chat",
|
|
42
|
-
"llama-2-7b-chat",
|
|
43
|
-
"llama-3.1-405b-turbo",
|
|
44
|
-
"llama-3.1-70b-turbo",
|
|
45
|
-
"llama-3.1-8b-turbo",
|
|
46
|
-
"LlamaGuard-2-8b",
|
|
47
|
-
"Yi-34B-Chat",
|
|
48
|
-
"Yi-34B",
|
|
49
|
-
"Yi-6B",
|
|
50
|
-
"Mixtral-8x7B-v0.1",
|
|
51
|
-
"Mixtral-8x22B",
|
|
52
|
-
"Mixtral-8x7B-Instruct-v0.1",
|
|
53
|
-
"Mixtral-8x22B-Instruct-v0.1",
|
|
54
|
-
"Mistral-7B-Instruct-v0.1",
|
|
55
|
-
"Mistral-7B-Instruct-v0.2",
|
|
56
|
-
"Mistral-7B-Instruct-v0.3",
|
|
57
|
-
"openchat-3.5",
|
|
58
|
-
"WizardLM-13B-V1.2",
|
|
59
|
-
"WizardCoder-Python-34B-V1.0",
|
|
60
|
-
"Qwen1.5-0.5B-Chat",
|
|
61
|
-
"Qwen1.5-1.8B-Chat",
|
|
62
|
-
"Qwen1.5-4B-Chat",
|
|
63
|
-
"Qwen1.5-7B-Chat",
|
|
64
|
-
"Qwen1.5-14B-Chat",
|
|
65
|
-
"Qwen1.5-72B-Chat",
|
|
66
|
-
"Qwen1.5-110B-Chat",
|
|
67
|
-
"Qwen2-72B-Instruct",
|
|
68
|
-
"gemma-2b-it",
|
|
69
|
-
"gemma-7b-it",
|
|
70
|
-
"gemma-2b",
|
|
71
|
-
"gemma-7b",
|
|
72
|
-
"dbrx-instruct",
|
|
73
|
-
"vicuna-7b-v1.5",
|
|
74
|
-
"vicuna-13b-v1.5",
|
|
75
|
-
"dolphin-2.5-mixtral-8x7b",
|
|
76
|
-
"deepseek-coder-33b-instruct",
|
|
77
|
-
"deepseek-coder-67b-instruct",
|
|
78
|
-
"deepseek-llm-67b-chat",
|
|
79
|
-
"Nous-Capybara-7B-V1p9",
|
|
80
|
-
"Nous-Hermes-2-Mixtral-8x7B-DPO",
|
|
81
|
-
"Nous-Hermes-2-Mixtral-8x7B-SFT",
|
|
82
|
-
"Nous-Hermes-llama-2-7b",
|
|
83
|
-
"Nous-Hermes-Llama2-13b",
|
|
84
|
-
"Nous-Hermes-2-Yi-34B",
|
|
85
|
-
"Mistral-7B-OpenOrca",
|
|
86
|
-
"alpaca-7b",
|
|
87
|
-
"OpenHermes-2-Mistral-7B",
|
|
88
|
-
"OpenHermes-2.5-Mistral-7B",
|
|
89
|
-
"phi-2",
|
|
90
|
-
"phi-3",
|
|
91
|
-
"WizardLM-2-8x22B",
|
|
92
|
-
"NexusRaven-V2-13B",
|
|
93
|
-
"Phind-CodeLlama-34B-v2",
|
|
94
|
-
"CodeLlama-7b-Python-hf",
|
|
95
|
-
"CodeLlama-7b-Python",
|
|
96
|
-
"CodeLlama-13b-Python-hf",
|
|
97
|
-
"CodeLlama-34b-Python-hf",
|
|
98
|
-
"CodeLlama-70b-Python-hf",
|
|
99
|
-
"snowflake-arctic-instruct",
|
|
100
|
-
"SOLAR-10.7B-Instruct-v1.0",
|
|
101
|
-
"StripedHyena-Hessian-7B",
|
|
102
|
-
"StripedHyena-Nous-7B",
|
|
103
|
-
"Llama-2-7B-32K-Instruct",
|
|
104
|
-
"CodeLlama-13b-Instruct",
|
|
105
|
-
"evo-1-131k-base",
|
|
106
|
-
"OLMo-7B-Instruct",
|
|
107
|
-
"Platypus2-70B-instruct",
|
|
108
|
-
"Snorkel-Mistral-PairRM-DPO",
|
|
109
|
-
"ReMM-SLERP-L2-13B",
|
|
110
|
-
"MythoMax-L2-13b",
|
|
111
|
-
"chronos-hermes-13b",
|
|
112
|
-
"Llama-Guard-7b",
|
|
113
|
-
"gemma-2-9b-it",
|
|
114
|
-
"gemma-2-27b-it",
|
|
115
|
-
"Toppy-M-7B",
|
|
116
|
-
"gemini-1.5-flash",
|
|
117
|
-
"gemini-1.5-pro",
|
|
118
|
-
"gemini-1.0-pro",
|
|
119
|
-
"command-r+",
|
|
120
|
-
"sparkdesk"
|
|
17
|
+
'claude-3-haiku-20240307', 'claude-3-sonnet-20240229', 'claude-3-5-sonnet-20240620',
|
|
18
|
+
'claude-3-opus-20240229', 'chatgpt-4o-latest', 'gpt-4', 'gpt-4-0613', 'gpt-4-turbo',
|
|
19
|
+
'gpt-4o-mini-2024-07-18', 'gpt-4o-mini', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0125',
|
|
20
|
+
'gpt-3.5-turbo-1106', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613',
|
|
21
|
+
'gpt-4o', 'llama-3-70b-chat', 'llama-3-70b-chat-turbo', 'llama-3-8b-chat',
|
|
22
|
+
'llama-3-8b-chat-turbo', 'llama-3-70b-chat-lite', 'llama-3-8b-chat-lite',
|
|
23
|
+
'llama-2-13b-chat', 'llama-3.1-405b-turbo', 'llama-3.1-70b-turbo', 'llama-3.1-8b-turbo',
|
|
24
|
+
'LlamaGuard-2-8b', 'Llama-Guard-7b', 'Meta-Llama-Guard-3-8B', 'Mixtral-8x7B-v0.1',
|
|
25
|
+
'Mixtral-8x7B-Instruct-v0.1', 'Mixtral-8x22B-Instruct-v0.1', 'Mistral-7B-Instruct-v0.1',
|
|
26
|
+
'Mistral-7B-Instruct-v0.2', 'Mistral-7B-Instruct-v0.3', 'Qwen1.5-72B-Chat',
|
|
27
|
+
'Qwen1.5-110B-Chat', 'Qwen2-72B-Instruct', 'gemma-2b-it', 'dbrx-instruct',
|
|
28
|
+
'deepseek-coder-33b-instruct', 'deepseek-llm-67b-chat', 'Nous-Hermes-2-Mixtral-8x7B-DPO',
|
|
29
|
+
'Nous-Hermes-2-Yi-34B', 'WizardLM-2-8x22B', 'CodeLlama-7b-Python',
|
|
30
|
+
'snowflake-arctic-instruct', 'SOLAR-10.7B-Instruct-v1.0', 'StripedHyena-Nous-7B',
|
|
31
|
+
'CodeLlama-13b-Instruct', 'MythoMax-L2-13b', 'gemma-2-9b-it', 'gemma-2-27b-it',
|
|
32
|
+
'gemini-1.5-flash', 'gemini-1.5-pro', 'sparkdesk', 'cosmosrp'
|
|
121
33
|
]
|
|
122
34
|
|
|
123
35
|
def __init__(
|
|
124
36
|
self,
|
|
125
|
-
model: str = "llama-3.1-405b-turbo",
|
|
126
|
-
max_tokens: int = 4096,
|
|
127
|
-
temperature: float = 1,
|
|
128
|
-
top_p: float = 1,
|
|
129
37
|
is_conversation: bool = True,
|
|
38
|
+
max_tokens: int = 600,
|
|
130
39
|
timeout: int = 30,
|
|
131
40
|
intro: str = None,
|
|
132
41
|
filepath: str = None,
|
|
@@ -134,18 +43,17 @@ class DiscordRocks(Provider):
|
|
|
134
43
|
proxies: dict = {},
|
|
135
44
|
history_offset: int = 10250,
|
|
136
45
|
act: str = None,
|
|
137
|
-
system_prompt: str =
|
|
46
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
47
|
+
model: str = "chatgpt-4o-latest",
|
|
48
|
+
temperature: float = 1,
|
|
49
|
+
top_p: float = 1,
|
|
138
50
|
):
|
|
139
51
|
"""
|
|
140
|
-
Initializes the
|
|
52
|
+
Initializes the Airforce API with given parameters.
|
|
141
53
|
|
|
142
54
|
Args:
|
|
143
|
-
api_key (str): The API key for authentication.
|
|
144
|
-
model (str): The AI model to use for text generation. Defaults to "llama-3-70b-chat".
|
|
145
|
-
max_tokens (int): The maximum number of tokens to generate. Defaults to 4096.
|
|
146
|
-
temperature (float): The temperature parameter for the model. Defaults to 1.
|
|
147
|
-
top_p (float): The top_p parameter for the model. Defaults to 1.
|
|
148
55
|
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
56
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
149
57
|
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
150
58
|
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
151
59
|
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
@@ -153,41 +61,43 @@ class DiscordRocks(Provider):
|
|
|
153
61
|
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
154
62
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
155
63
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
156
|
-
system_prompt (str, optional): System prompt to
|
|
64
|
+
system_prompt (str, optional): System prompt for Airforce. Defaults to "You are a helpful AI assistant.".
|
|
65
|
+
model (str, optional): AI model to use. Defaults to "chatgpt-4o-latest".
|
|
66
|
+
temperature (float, optional): Temperature parameter for the model. Defaults to 1.
|
|
67
|
+
top_p (float, optional): Top_p parameter for the model. Defaults to 1.
|
|
157
68
|
"""
|
|
158
69
|
if model not in self.AVAILABLE_MODELS:
|
|
159
|
-
raise ValueError(f
|
|
70
|
+
raise ValueError(f'Error: Invalid model. Please choose from {self.AVAILABLE_MODELS}')
|
|
160
71
|
|
|
161
|
-
|
|
162
|
-
self.model = model
|
|
163
|
-
self.max_tokens = max_tokens
|
|
164
|
-
self.temperature = temperature
|
|
165
|
-
self.top_p = top_p
|
|
166
72
|
self.session = requests.Session()
|
|
167
73
|
self.is_conversation = is_conversation
|
|
168
74
|
self.max_tokens_to_sample = max_tokens
|
|
75
|
+
self.api_endpoint = "https://api.airforce/chat/completions"
|
|
76
|
+
self.stream_chunk_size = 1024
|
|
169
77
|
self.timeout = timeout
|
|
170
78
|
self.last_response = {}
|
|
171
|
-
self.
|
|
172
|
-
self.
|
|
79
|
+
self.system_prompt = system_prompt
|
|
80
|
+
self.model = model
|
|
81
|
+
self.temperature = temperature
|
|
82
|
+
self.top_p = top_p
|
|
173
83
|
self.headers = {
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
"
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
|
|
84
|
+
'accept': '*/*',
|
|
85
|
+
'accept-encoding': 'gzip, deflate, br, zstd',
|
|
86
|
+
'accept-language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
87
|
+
'authorization': 'Bearer missing api key',
|
|
88
|
+
'content-type': 'application/json',
|
|
89
|
+
'dnt': '1',
|
|
90
|
+
'origin': 'https://llmplayground.net',
|
|
91
|
+
'referer': 'https://llmplayground.net/',
|
|
92
|
+
'sec-ch-ua': '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
93
|
+
'sec-ch-ua-mobile': '?0',
|
|
94
|
+
'sec-ch-ua-platform': '"Windows"',
|
|
95
|
+
'sec-fetch-dest': 'empty',
|
|
96
|
+
'sec-fetch-mode': 'cors',
|
|
97
|
+
'sec-fetch-site': 'cross-site',
|
|
98
|
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0'
|
|
190
99
|
}
|
|
100
|
+
|
|
191
101
|
self.__available_optimizers = (
|
|
192
102
|
method
|
|
193
103
|
for method in dir(Optimizers)
|
|
@@ -206,7 +116,6 @@ class DiscordRocks(Provider):
|
|
|
206
116
|
)
|
|
207
117
|
self.conversation.history_offset = history_offset
|
|
208
118
|
self.session.proxies = proxies
|
|
209
|
-
self.system_prompt = system_prompt # Store the system prompt
|
|
210
119
|
|
|
211
120
|
def ask(
|
|
212
121
|
self,
|
|
@@ -216,19 +125,7 @@ class DiscordRocks(Provider):
|
|
|
216
125
|
optimizer: str = None,
|
|
217
126
|
conversationally: bool = False,
|
|
218
127
|
) -> Dict[str, Any]:
|
|
219
|
-
"""
|
|
220
|
-
Sends a prompt to the DiscordRocks AI API and returns the response.
|
|
221
|
-
|
|
222
|
-
Args:
|
|
223
|
-
prompt: The text prompt to generate text from.
|
|
224
|
-
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
225
|
-
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
226
|
-
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
227
|
-
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
228
|
-
|
|
229
|
-
Returns:
|
|
230
|
-
The response from the API.
|
|
231
|
-
"""
|
|
128
|
+
"""Chat with AI"""
|
|
232
129
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
233
130
|
if optimizer:
|
|
234
131
|
if optimizer in self.__available_optimizers:
|
|
@@ -239,59 +136,65 @@ class DiscordRocks(Provider):
|
|
|
239
136
|
raise Exception(
|
|
240
137
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
241
138
|
)
|
|
242
|
-
|
|
139
|
+
|
|
140
|
+
# Define the payload
|
|
243
141
|
payload = {
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
"temperature": self.temperature,
|
|
251
|
-
"top_p": self.top_p,
|
|
252
|
-
"stream": stream
|
|
142
|
+
'messages': [{'role': 'user', 'content': conversation_prompt}],
|
|
143
|
+
'model': self.model,
|
|
144
|
+
'max_tokens': self.max_tokens_to_sample,
|
|
145
|
+
'temperature': self.temperature,
|
|
146
|
+
'top_p': self.top_p,
|
|
147
|
+
'stream': stream
|
|
253
148
|
}
|
|
254
149
|
|
|
255
150
|
def for_stream():
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
151
|
+
try:
|
|
152
|
+
# Send the POST request
|
|
153
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True)
|
|
154
|
+
|
|
155
|
+
# Check if the request was successful
|
|
156
|
+
response.raise_for_status()
|
|
157
|
+
|
|
158
|
+
full_content = ''
|
|
159
|
+
for line in response.iter_lines():
|
|
160
|
+
if line:
|
|
161
|
+
decoded_line = line.decode('utf-8')
|
|
162
|
+
if decoded_line.startswith('data:'):
|
|
163
|
+
if decoded_line.strip() == 'data: [DONE]':
|
|
164
|
+
break
|
|
165
|
+
try:
|
|
166
|
+
json_data = json.loads(decoded_line[5:])
|
|
167
|
+
content = json_data['choices'][0]['delta'].get('content', '')
|
|
168
|
+
if content:
|
|
169
|
+
full_content += content
|
|
170
|
+
yield content if raw else dict(text=full_content)
|
|
171
|
+
except json.JSONDecodeError:
|
|
172
|
+
print(f'Error decoding JSON: {decoded_line}')
|
|
173
|
+
except KeyError:
|
|
174
|
+
print(f'Unexpected JSON structure: {json_data}')
|
|
175
|
+
self.last_response.update(dict(text=full_content))
|
|
176
|
+
self.conversation.update_chat_history(
|
|
177
|
+
prompt, self.get_message(self.last_response)
|
|
263
178
|
)
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
if line:
|
|
267
|
-
try:
|
|
268
|
-
json_line = json.loads(line.decode('utf-8').split('data: ')[1])
|
|
269
|
-
content = json_line['choices'][0]['delta']['content']
|
|
270
|
-
streaming_response += content
|
|
271
|
-
yield content if raw else dict(text=streaming_response)
|
|
272
|
-
except:
|
|
273
|
-
continue
|
|
274
|
-
self.last_response.update(dict(text=streaming_response))
|
|
275
|
-
self.conversation.update_chat_history(
|
|
276
|
-
prompt, self.get_message(self.last_response)
|
|
277
|
-
)
|
|
179
|
+
except requests.exceptions.RequestException as e:
|
|
180
|
+
raise exceptions.FailedToGenerateResponseError(f'An error occurred: {e}')
|
|
278
181
|
|
|
279
182
|
def for_non_stream():
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
183
|
+
try:
|
|
184
|
+
# Send the POST request
|
|
185
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload)
|
|
186
|
+
|
|
187
|
+
# Check if the request was successful
|
|
188
|
+
response.raise_for_status()
|
|
189
|
+
|
|
190
|
+
resp = response.json()
|
|
191
|
+
self.last_response.update(dict(text=resp.get("choices", [{}])[0].get('message', {}).get('content', '')))
|
|
192
|
+
self.conversation.update_chat_history(
|
|
193
|
+
prompt, self.get_message(self.last_response)
|
|
287
194
|
)
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
self.conversation.update_chat_history(
|
|
292
|
-
prompt, self.get_message(self.last_response)
|
|
293
|
-
)
|
|
294
|
-
return self.last_response
|
|
195
|
+
return self.last_response
|
|
196
|
+
except requests.exceptions.RequestException as e:
|
|
197
|
+
raise exceptions.FailedToGenerateResponseError(f'An error occurred: {e}')
|
|
295
198
|
|
|
296
199
|
return for_stream() if stream else for_non_stream()
|
|
297
200
|
|
|
@@ -342,49 +245,9 @@ class DiscordRocks(Provider):
|
|
|
342
245
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
343
246
|
return response["text"]
|
|
344
247
|
|
|
345
|
-
|
|
346
|
-
self,
|
|
347
|
-
prompt: str,
|
|
348
|
-
model: str = "dall-e-3",
|
|
349
|
-
n: int = 1,
|
|
350
|
-
quality: str = "hd",
|
|
351
|
-
response_format: str = "url",
|
|
352
|
-
size: str = "1024x1024",
|
|
353
|
-
) -> dict:
|
|
354
|
-
"""
|
|
355
|
-
Generates an image using the DiscordRocks API.
|
|
356
|
-
|
|
357
|
-
Args:
|
|
358
|
-
prompt (str): The prompt describing the image to generate.
|
|
359
|
-
model (str, optional): The image generation model to use. Defaults to "dall-e-3".
|
|
360
|
-
n (int, optional): The number of images to generate. Defaults to 1.
|
|
361
|
-
quality (str, optional): The quality of the generated images ("standard", "hd"). Defaults to "hd".
|
|
362
|
-
response_format (str, optional): The response format ("url", "b64_json"). Defaults to "url".
|
|
363
|
-
size (str, optional): The size of the generated images ("256x256", "512x512", "1024x1024").
|
|
364
|
-
Defaults to "1024x1024".
|
|
365
|
-
|
|
366
|
-
Returns:
|
|
367
|
-
dict: A dictionary containing the response from the API, including the generated image URLs.
|
|
368
|
-
"""
|
|
369
|
-
payload = {
|
|
370
|
-
"prompt": prompt,
|
|
371
|
-
"model": model,
|
|
372
|
-
"n": n,
|
|
373
|
-
"quality": quality,
|
|
374
|
-
"response format": response_format,
|
|
375
|
-
"size": size
|
|
376
|
-
}
|
|
377
|
-
|
|
378
|
-
response = self.session.post(self.images_generations_url, headers=self.headers, json=payload)
|
|
379
|
-
if not response.ok:
|
|
380
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
381
|
-
f"Failed to generate image - ({response.status_code}, {response.reason})"
|
|
382
|
-
)
|
|
383
|
-
|
|
384
|
-
return response.json().get("data", [])
|
|
385
|
-
if __name__ == "__main__":
|
|
248
|
+
if __name__ == '__main__':
|
|
386
249
|
from rich import print
|
|
387
250
|
ai = DiscordRocks()
|
|
388
|
-
response = ai.chat("
|
|
251
|
+
response = ai.chat(input(">>> "))
|
|
389
252
|
for chunk in response:
|
|
390
|
-
print(chunk, end="", flush=True)
|
|
253
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Farfalle.py
CHANGED