webscout 5.7__py3-none-any.whl → 5.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +76 -2
- webscout/Agents/Onlinesearcher.py +123 -115
- webscout/Provider/Amigo.py +265 -0
- webscout/Provider/ChatGPTES.py +239 -0
- webscout/Provider/Deepinfra.py +1 -1
- webscout/Provider/TTI/WebSimAI.py +142 -0
- webscout/Provider/TTI/__init__.py +5 -1
- webscout/Provider/TTI/aiforce.py +36 -13
- webscout/Provider/TTI/amigo.py +148 -0
- webscout/Provider/TTI/artbit.py +141 -0
- webscout/Provider/TTI/huggingface.py +155 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/parler.py +108 -0
- webscout/Provider/__init__.py +18 -0
- webscout/Provider/bixin.py +264 -0
- webscout/Provider/genspark.py +46 -43
- webscout/Provider/learnfastai.py +253 -0
- webscout/Provider/llamatutor.py +222 -0
- webscout/Provider/prefind.py +232 -0
- webscout/Provider/promptrefine.py +191 -0
- webscout/Provider/tutorai.py +354 -0
- webscout/Provider/twitterclone.py +260 -0
- webscout/__init__.py +1 -0
- webscout/version.py +1 -1
- {webscout-5.7.dist-info → webscout-5.9.dist-info}/METADATA +184 -89
- {webscout-5.7.dist-info → webscout-5.9.dist-info}/RECORD +30 -16
- {webscout-5.7.dist-info → webscout-5.9.dist-info}/LICENSE.md +0 -0
- {webscout-5.7.dist-info → webscout-5.9.dist-info}/WHEEL +0 -0
- {webscout-5.7.dist-info → webscout-5.9.dist-info}/entry_points.txt +0 -0
- {webscout-5.7.dist-info → webscout-5.9.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import re
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from typing import List, Dict
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from rich import print
|
|
10
|
+
|
|
11
|
+
class ChatGPTES(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the ChatGPT.es API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
SUPPORTED_MODELS = {
|
|
17
|
+
'gpt-4o', 'gpt-4o-mini', 'chatgpt-4o-latest'
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
is_conversation: bool = True,
|
|
23
|
+
max_tokens: int = 600,
|
|
24
|
+
timeout: int = 30,
|
|
25
|
+
intro: str = None,
|
|
26
|
+
filepath: str = None,
|
|
27
|
+
update_file: bool = True,
|
|
28
|
+
proxies: dict = {},
|
|
29
|
+
history_offset: int = 10250,
|
|
30
|
+
act: str = None,
|
|
31
|
+
model: str = "chatgpt-4o-latest", # Default model
|
|
32
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
33
|
+
):
|
|
34
|
+
"""
|
|
35
|
+
Initializes the ChatGPT.es API with given parameters.
|
|
36
|
+
"""
|
|
37
|
+
if model not in self.SUPPORTED_MODELS:
|
|
38
|
+
raise ValueError(f"Unsupported model: {model}. Choose from: {self.SUPPORTED_MODELS}")
|
|
39
|
+
|
|
40
|
+
self.session = requests.Session()
|
|
41
|
+
self.is_conversation = is_conversation
|
|
42
|
+
self.max_tokens_to_sample = max_tokens
|
|
43
|
+
self.api_endpoint = 'https://chatgpt.es/wp-admin/admin-ajax.php'
|
|
44
|
+
self.stream_chunk_size = 64
|
|
45
|
+
self.timeout = timeout
|
|
46
|
+
self.last_response = {}
|
|
47
|
+
self.system_prompt = system_prompt
|
|
48
|
+
self.model = model
|
|
49
|
+
self.initial_headers = {
|
|
50
|
+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) '
|
|
51
|
+
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
|
|
52
|
+
'Referer': 'https://www.google.com/',
|
|
53
|
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,'
|
|
54
|
+
'image/avif,image/webp,image/apng,*/*;q=0.8,'
|
|
55
|
+
'application/signed-exchange;v=b3;q=0.7',
|
|
56
|
+
}
|
|
57
|
+
self.post_headers = {
|
|
58
|
+
'User-Agent': self.initial_headers['User-Agent'],
|
|
59
|
+
'Referer': 'https://chatgpt.es/',
|
|
60
|
+
'Origin': 'https://chatgpt.es',
|
|
61
|
+
'Accept': '*/*',
|
|
62
|
+
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
|
|
63
|
+
}
|
|
64
|
+
self.nonce = None
|
|
65
|
+
self.post_id = None
|
|
66
|
+
|
|
67
|
+
self.__available_optimizers = (
|
|
68
|
+
method
|
|
69
|
+
for method in dir(Optimizers)
|
|
70
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Conversation setup
|
|
74
|
+
Conversation.intro = (
|
|
75
|
+
AwesomePrompts().get_act(
|
|
76
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
77
|
+
)
|
|
78
|
+
if act
|
|
79
|
+
else intro or Conversation.intro
|
|
80
|
+
)
|
|
81
|
+
self.conversation = Conversation(
|
|
82
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
83
|
+
)
|
|
84
|
+
self.conversation.history_offset = history_offset
|
|
85
|
+
self.session.proxies = proxies
|
|
86
|
+
|
|
87
|
+
def get_nonce_and_post_id(self):
|
|
88
|
+
"""
|
|
89
|
+
Retrieves the nonce and post ID from the ChatGPT.es website.
|
|
90
|
+
"""
|
|
91
|
+
try:
|
|
92
|
+
response = self.session.get('https://chatgpt.es/', headers=self.initial_headers, timeout=self.timeout)
|
|
93
|
+
response.raise_for_status()
|
|
94
|
+
except requests.RequestException as e:
|
|
95
|
+
raise ConnectionError(f"Failed to retrieve nonce and post_id: {e}")
|
|
96
|
+
|
|
97
|
+
nonce_match = re.search(r'data-nonce="(.+?)"', response.text)
|
|
98
|
+
post_id_match = re.search(r'data-post-id="(.+?)"', response.text)
|
|
99
|
+
|
|
100
|
+
if not nonce_match or not post_id_match:
|
|
101
|
+
raise ValueError("Failed to parse nonce or post_id from the response.")
|
|
102
|
+
|
|
103
|
+
self.nonce = nonce_match.group(1)
|
|
104
|
+
self.post_id = post_id_match.group(1)
|
|
105
|
+
|
|
106
|
+
def ask(
|
|
107
|
+
self,
|
|
108
|
+
prompt: str,
|
|
109
|
+
stream: bool = False,
|
|
110
|
+
raw: bool = False,
|
|
111
|
+
optimizer: str = None,
|
|
112
|
+
conversationally: bool = False,
|
|
113
|
+
) -> dict:
|
|
114
|
+
"""
|
|
115
|
+
Chat with ChatGPT.es
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
prompt (str): Prompt to be sent.
|
|
119
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
120
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
121
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
122
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
dict: Response dictionary.
|
|
126
|
+
"""
|
|
127
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
128
|
+
if optimizer:
|
|
129
|
+
if optimizer in self.__available_optimizers:
|
|
130
|
+
optimizer_func = getattr(Optimizers, optimizer)
|
|
131
|
+
conversation_prompt = optimizer_func(
|
|
132
|
+
conversation_prompt if conversationally else prompt
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
raise ValueError(f"Optimizer '{optimizer}' is not supported. "
|
|
136
|
+
f"Available optimizers: {list(self.__available_optimizers)}")
|
|
137
|
+
|
|
138
|
+
# Retrieve nonce and post_id if they are not set
|
|
139
|
+
if not self.nonce or not self.post_id:
|
|
140
|
+
self.get_nonce_and_post_id()
|
|
141
|
+
|
|
142
|
+
messages = [
|
|
143
|
+
{"role": "user", "content": conversation_prompt},
|
|
144
|
+
]
|
|
145
|
+
|
|
146
|
+
# Prepare conversation history
|
|
147
|
+
conversation = ["Human: strictly respond in the same language as my prompt, preferably English"]
|
|
148
|
+
for msg in messages:
|
|
149
|
+
role = "Human" if msg['role'] == "user" else "AI"
|
|
150
|
+
conversation.append(f"{role}: {msg['content']}")
|
|
151
|
+
|
|
152
|
+
payload = {
|
|
153
|
+
'_wpnonce': self.nonce,
|
|
154
|
+
'post_id': self.post_id,
|
|
155
|
+
'url': 'https://chatgpt.es',
|
|
156
|
+
'action': 'wpaicg_chat_shortcode_message',
|
|
157
|
+
'message': messages[-1]['content'],
|
|
158
|
+
'bot_id': '0',
|
|
159
|
+
'chatbot_identity': 'shortcode',
|
|
160
|
+
'wpaicg_chat_client_id': os.urandom(5).hex(),
|
|
161
|
+
'wpaicg_chat_history': json.dumps(conversation)
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
try:
|
|
165
|
+
response = self.session.post(
|
|
166
|
+
self.api_endpoint,
|
|
167
|
+
headers=self.post_headers,
|
|
168
|
+
data=payload,
|
|
169
|
+
timeout=self.timeout
|
|
170
|
+
)
|
|
171
|
+
response.raise_for_status()
|
|
172
|
+
except requests.RequestException as e:
|
|
173
|
+
raise ConnectionError(f"Failed to send request to ChatGPT.es: {e}")
|
|
174
|
+
|
|
175
|
+
try:
|
|
176
|
+
response_data = response.json()
|
|
177
|
+
except json.JSONDecodeError:
|
|
178
|
+
raise ValueError(f"Invalid JSON response: {response.text}")
|
|
179
|
+
|
|
180
|
+
if not isinstance(response_data, dict):
|
|
181
|
+
raise TypeError(f"Expected response_data to be a dict, got {type(response_data)}")
|
|
182
|
+
|
|
183
|
+
# Extract the message directly from the 'data' key
|
|
184
|
+
message = response_data.get('data')
|
|
185
|
+
if not isinstance(message, str):
|
|
186
|
+
raise KeyError("Missing 'data' key in response or 'data' is not a string")
|
|
187
|
+
|
|
188
|
+
self.last_response.update(dict(text=message))
|
|
189
|
+
self.conversation.update_chat_history(
|
|
190
|
+
prompt, self.get_message(self.last_response)
|
|
191
|
+
)
|
|
192
|
+
return self.last_response
|
|
193
|
+
|
|
194
|
+
def chat(
|
|
195
|
+
self,
|
|
196
|
+
prompt: str,
|
|
197
|
+
stream: bool = False,
|
|
198
|
+
optimizer: str = None,
|
|
199
|
+
conversationally: bool = False,
|
|
200
|
+
) -> str:
|
|
201
|
+
"""
|
|
202
|
+
Generate response as a string.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
prompt (str): Prompt to be sent.
|
|
206
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
207
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
208
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
str: Response generated.
|
|
212
|
+
"""
|
|
213
|
+
response = self.ask(
|
|
214
|
+
prompt,
|
|
215
|
+
stream=stream,
|
|
216
|
+
optimizer=optimizer,
|
|
217
|
+
conversationally=conversationally,
|
|
218
|
+
)
|
|
219
|
+
return self.get_message(response)
|
|
220
|
+
|
|
221
|
+
def get_message(self, response: dict) -> str:
|
|
222
|
+
"""
|
|
223
|
+
Retrieves message only from response.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
response (dict): Response generated by `self.ask`.
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
str: Message extracted.
|
|
230
|
+
"""
|
|
231
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
232
|
+
return response["text"]
|
|
233
|
+
|
|
234
|
+
if __name__ == "__main__":
|
|
235
|
+
ai = ChatGPTES()
|
|
236
|
+
response = ai.chat(input(">>> "))
|
|
237
|
+
for chunk in response:
|
|
238
|
+
print(chunk, end="", flush=True)
|
|
239
|
+
|
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -21,7 +21,7 @@ class DeepInfra(Provider):
|
|
|
21
21
|
proxies: dict = {},
|
|
22
22
|
history_offset: int = 10250,
|
|
23
23
|
act: str = None,
|
|
24
|
-
model: str = "Qwen/Qwen2-72B-Instruct",
|
|
24
|
+
model: str = "Qwen/Qwen2.5-72B-Instruct",
|
|
25
25
|
system_prompt: str = "You are a Helpful AI."
|
|
26
26
|
):
|
|
27
27
|
"""Instantiates DeepInfra
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import os
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from webscout.AIbase import ImageProvider
|
|
6
|
+
|
|
7
|
+
class WebSimAI(ImageProvider):
|
|
8
|
+
"""
|
|
9
|
+
Image provider for WebSim.ai.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(self, timeout: int = 60, proxies: dict = {}):
|
|
13
|
+
"""Initializes the WebSimAI class.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
timeout (int, optional): HTTP request timeout in seconds. Defaults to 60.
|
|
17
|
+
proxies (dict, optional): HTTP request proxies (socks). Defaults to {}.
|
|
18
|
+
"""
|
|
19
|
+
self.url = "https://websim.ai/api/image_gen"
|
|
20
|
+
self.headers = {
|
|
21
|
+
"Accept": "*/*",
|
|
22
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
23
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
24
|
+
"Content-Type": "application/json",
|
|
25
|
+
"User-Agent": (
|
|
26
|
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
27
|
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
28
|
+
"Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"
|
|
29
|
+
),
|
|
30
|
+
"Origin": "https://websim.ai",
|
|
31
|
+
"Referer": "https://websim.ai/p/a5yvwmtj8qz6ayx4tlg1"
|
|
32
|
+
}
|
|
33
|
+
self.session = requests.Session()
|
|
34
|
+
self.session.headers.update(self.headers)
|
|
35
|
+
self.session.proxies.update(proxies)
|
|
36
|
+
self.timeout = timeout
|
|
37
|
+
self.prompt: str = "AI-generated image - Webscout"
|
|
38
|
+
self.image_extension: str = "png"
|
|
39
|
+
|
|
40
|
+
def generate(
|
|
41
|
+
self,
|
|
42
|
+
prompt: str,
|
|
43
|
+
amount: int = 1,
|
|
44
|
+
width: int = 1024,
|
|
45
|
+
height: int = 756
|
|
46
|
+
) -> List[bytes]:
|
|
47
|
+
"""Generate image from prompt
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
prompt (str): Image description.
|
|
51
|
+
amount (int, optional): Total images to be generated. Defaults to 1.
|
|
52
|
+
width (int, optional): Width of the generated image. Defaults to 1024.
|
|
53
|
+
height (int, optional): Height of the generated image. Defaults to 756.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
List[bytes]: List of generated images as bytes.
|
|
57
|
+
"""
|
|
58
|
+
assert bool(prompt), "Prompt cannot be null"
|
|
59
|
+
assert isinstance(amount, int), f"Amount should be an integer only, not {type(amount)}"
|
|
60
|
+
assert amount > 0, "Amount should be greater than 0"
|
|
61
|
+
|
|
62
|
+
self.prompt = prompt
|
|
63
|
+
response = []
|
|
64
|
+
|
|
65
|
+
for _ in range(amount):
|
|
66
|
+
payload = {
|
|
67
|
+
"prompt": prompt,
|
|
68
|
+
"width": width,
|
|
69
|
+
"height": height,
|
|
70
|
+
"site_id": "KcWvHOHNBP2PmWUYZ",
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
try:
|
|
74
|
+
resp = self.session.post(self.url, headers=self.headers, json=payload, timeout=self.timeout)
|
|
75
|
+
resp.raise_for_status() # Raises HTTPError for bad responses
|
|
76
|
+
|
|
77
|
+
response_data = resp.json()
|
|
78
|
+
image_url = response_data.get("url")
|
|
79
|
+
if not image_url:
|
|
80
|
+
print("No image URL found in the response.")
|
|
81
|
+
continue
|
|
82
|
+
|
|
83
|
+
image_response = requests.get(image_url)
|
|
84
|
+
image_response.raise_for_status()
|
|
85
|
+
response.append(image_response.content)
|
|
86
|
+
|
|
87
|
+
except requests.exceptions.HTTPError as http_err:
|
|
88
|
+
print(f"HTTP error occurred: {http_err} - {response.text}")
|
|
89
|
+
return [] # Return an empty list on error
|
|
90
|
+
except requests.exceptions.RequestException as req_err:
|
|
91
|
+
print(f"Request error occurred: {req_err}")
|
|
92
|
+
return [] # Return an empty list on error
|
|
93
|
+
|
|
94
|
+
return response
|
|
95
|
+
|
|
96
|
+
def save(
|
|
97
|
+
self,
|
|
98
|
+
response: List[bytes],
|
|
99
|
+
name: str = None,
|
|
100
|
+
dir: str = os.getcwd(),
|
|
101
|
+
filenames_prefix: str = "",
|
|
102
|
+
) -> List[str]:
|
|
103
|
+
"""Save generated images
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
response (List[bytes]): List of generated images as bytes.
|
|
107
|
+
name (str): Filename for the images. Defaults to the last prompt.
|
|
108
|
+
dir (str, optional): Directory for saving images. Defaults to os.getcwd().
|
|
109
|
+
filenames_prefix (str, optional): String to be prefixed at each filename to be returned.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
List[str]: List of saved filenames.
|
|
113
|
+
"""
|
|
114
|
+
assert isinstance(response, list), f"Response should be of {list} not {type(response)}"
|
|
115
|
+
name = self.prompt if name is None else name
|
|
116
|
+
|
|
117
|
+
filenames = []
|
|
118
|
+
count = 0
|
|
119
|
+
for image in response:
|
|
120
|
+
def complete_path():
|
|
121
|
+
count_value = "" if count == 0 else f"_{count}"
|
|
122
|
+
return os.path.join(dir, name + count_value + "." + self.image_extension)
|
|
123
|
+
|
|
124
|
+
while os.path.isfile(complete_path()):
|
|
125
|
+
count += 1
|
|
126
|
+
|
|
127
|
+
absolute_path_to_file = complete_path()
|
|
128
|
+
filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
|
|
129
|
+
|
|
130
|
+
with open(absolute_path_to_file, "wb") as fh:
|
|
131
|
+
fh.write(image)
|
|
132
|
+
|
|
133
|
+
return filenames
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
if __name__ == "__main__":
|
|
137
|
+
bot = WebSimAI()
|
|
138
|
+
try:
|
|
139
|
+
resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
|
|
140
|
+
print(bot.save(resp))
|
|
141
|
+
except Exception as e:
|
|
142
|
+
print(f"An error occurred: {e}")
|
webscout/Provider/TTI/aiforce.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import os
|
|
3
3
|
import time
|
|
4
|
-
from typing import List
|
|
4
|
+
from typing import List, Optional
|
|
5
5
|
from string import punctuation
|
|
6
6
|
from random import choice
|
|
7
7
|
from requests.exceptions import RequestException
|
|
@@ -9,16 +9,29 @@ from requests.exceptions import RequestException
|
|
|
9
9
|
from webscout.AIbase import ImageProvider
|
|
10
10
|
|
|
11
11
|
class AiForceimagger(ImageProvider):
|
|
12
|
-
"""Image provider for
|
|
12
|
+
"""Image provider for Airforce API"""
|
|
13
|
+
|
|
14
|
+
AVAILABLE_MODELS = [
|
|
15
|
+
"flux",
|
|
16
|
+
"flux-realism",
|
|
17
|
+
"flux-anime",
|
|
18
|
+
"flux-3d",
|
|
19
|
+
"flux-disney",
|
|
20
|
+
"flux-pixel",
|
|
21
|
+
"flux-4o",
|
|
22
|
+
"any-dark"
|
|
23
|
+
]
|
|
13
24
|
|
|
14
25
|
def __init__(self, timeout: int = 60, proxies: dict = {}):
|
|
15
|
-
"""Initializes the
|
|
26
|
+
"""Initializes the AiForceimagger class.
|
|
16
27
|
|
|
17
28
|
Args:
|
|
29
|
+
api_token (str, optional): Your Airforce API token. If None, it will use the environment variable "AIRFORCE_API_TOKEN".
|
|
30
|
+
Defaults to None.
|
|
18
31
|
timeout (int, optional): HTTP request timeout in seconds. Defaults to 60.
|
|
19
32
|
proxies (dict, optional): HTTP request proxies (socks). Defaults to {}.
|
|
20
33
|
"""
|
|
21
|
-
self.
|
|
34
|
+
self.api_endpoint = "https://api.airforce/imagine2"
|
|
22
35
|
self.headers = {
|
|
23
36
|
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
|
|
24
37
|
"Accept-Language": "en-US,en;q=0.5",
|
|
@@ -33,18 +46,28 @@ class AiForceimagger(ImageProvider):
|
|
|
33
46
|
self.image_extension: str = "png"
|
|
34
47
|
|
|
35
48
|
def generate(
|
|
36
|
-
self,
|
|
37
|
-
|
|
49
|
+
self,
|
|
50
|
+
prompt: str,
|
|
51
|
+
amount: int = 1,
|
|
52
|
+
additives: bool = True,
|
|
53
|
+
model: str = "flux-realism",
|
|
54
|
+
width: int = 768,
|
|
55
|
+
height: int = 768,
|
|
56
|
+
seed: Optional[int] = None,
|
|
57
|
+
max_retries: int = 3,
|
|
58
|
+
retry_delay: int = 5
|
|
38
59
|
) -> List[bytes]:
|
|
39
60
|
"""Generate image from prompt
|
|
40
61
|
|
|
41
62
|
Args:
|
|
42
63
|
prompt (str): Image description.
|
|
43
|
-
amount (int): Total images to be generated. Defaults to 1.
|
|
64
|
+
amount (int, optional): Total images to be generated. Defaults to 1.
|
|
44
65
|
additives (bool, optional): Try to make each prompt unique. Defaults to True.
|
|
66
|
+
model (str, optional): The model to use for image generation.
|
|
67
|
+
Defaults to "flux". Available options: "flux", "flux-realism".
|
|
45
68
|
width (int, optional): Width of the generated image. Defaults to 768.
|
|
46
69
|
height (int, optional): Height of the generated image. Defaults to 768.
|
|
47
|
-
|
|
70
|
+
seed (int, optional): Seed for the random number generator. Defaults to None.
|
|
48
71
|
max_retries (int, optional): Maximum number of retry attempts. Defaults to 3.
|
|
49
72
|
retry_delay (int, optional): Delay between retries in seconds. Defaults to 5.
|
|
50
73
|
|
|
@@ -54,6 +77,7 @@ class AiForceimagger(ImageProvider):
|
|
|
54
77
|
assert bool(prompt), "Prompt cannot be null"
|
|
55
78
|
assert isinstance(amount, int), f"Amount should be an integer only not {type(amount)}"
|
|
56
79
|
assert amount > 0, "Amount should be greater than 0"
|
|
80
|
+
assert model in self.AVAILABLE_MODELS, f"Model should be one of {self.AVAILABLE_MODELS}"
|
|
57
81
|
|
|
58
82
|
ads = lambda: (
|
|
59
83
|
""
|
|
@@ -68,9 +92,9 @@ class AiForceimagger(ImageProvider):
|
|
|
68
92
|
self.prompt = prompt
|
|
69
93
|
response = []
|
|
70
94
|
for _ in range(amount):
|
|
71
|
-
url = self.
|
|
72
|
-
|
|
73
|
-
|
|
95
|
+
url = f"{self.api_endpoint}?model={model}&prompt={prompt}&size={width}:{height}"
|
|
96
|
+
if seed:
|
|
97
|
+
url += f"&seed={seed}"
|
|
74
98
|
|
|
75
99
|
for attempt in range(max_retries):
|
|
76
100
|
try:
|
|
@@ -127,11 +151,10 @@ class AiForceimagger(ImageProvider):
|
|
|
127
151
|
|
|
128
152
|
return filenames
|
|
129
153
|
|
|
130
|
-
|
|
131
154
|
if __name__ == "__main__":
|
|
132
155
|
bot = AiForceimagger()
|
|
133
156
|
try:
|
|
134
|
-
resp = bot.generate("
|
|
157
|
+
resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1, model="flux-realism")
|
|
135
158
|
print(bot.save(resp))
|
|
136
159
|
except Exception as e:
|
|
137
160
|
print(f"An error occurred: {e}")
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import uuid
|
|
4
|
+
import os
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
7
|
+
from webscout.AIbase import ImageProvider
|
|
8
|
+
|
|
9
|
+
class AmigoImager(ImageProvider):
|
|
10
|
+
"""
|
|
11
|
+
Image provider for AmigoChat.io.
|
|
12
|
+
"""
|
|
13
|
+
AVAILABLE_MODELS = ["dalle-e-3", "flux-pro", "flux-realism"]
|
|
14
|
+
def __init__(self, timeout: int = 60, proxies: dict = {}):
|
|
15
|
+
"""Initializes the AmigoImager class.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
timeout (int, optional): HTTP request timeout in seconds. Defaults to 60.
|
|
19
|
+
proxies (dict, optional): HTTP request proxies. Defaults to {}.
|
|
20
|
+
"""
|
|
21
|
+
self.url = "https://api.amigochat.io/v1/images/generations"
|
|
22
|
+
self.headers = {
|
|
23
|
+
"Accept": "*/*",
|
|
24
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
25
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
26
|
+
"Authorization": "Bearer ", # Empty
|
|
27
|
+
"Content-Type": "application/json; charset=utf-8",
|
|
28
|
+
"DNT": "1",
|
|
29
|
+
"Origin": "https://amigochat.io",
|
|
30
|
+
"Referer": "https://amigochat.io/",
|
|
31
|
+
"Sec-CH-UA": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
|
|
32
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
33
|
+
"Sec-CH-UA-Platform": '"Windows"',
|
|
34
|
+
"Sec-Fetch-Dest": "empty",
|
|
35
|
+
"Sec-Fetch-Mode": "cors",
|
|
36
|
+
"Sec-Fetch-Site": "same-site",
|
|
37
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0",
|
|
38
|
+
"X-Device-Language": "en-US",
|
|
39
|
+
"X-Device-Platform": "web",
|
|
40
|
+
"X-Device-UUID": str(uuid.uuid4()),
|
|
41
|
+
"X-Device-Version": "1.0.22"
|
|
42
|
+
}
|
|
43
|
+
self.session = requests.Session()
|
|
44
|
+
self.session.headers.update(self.headers)
|
|
45
|
+
self.session.proxies.update(proxies)
|
|
46
|
+
self.timeout = timeout
|
|
47
|
+
self.prompt: str = "AI-generated image - webscout"
|
|
48
|
+
self.image_extension: str = "png"
|
|
49
|
+
|
|
50
|
+
def generate(self, prompt: str, amount: int = 1, model: str = "flux-pro") -> List[str]:
|
|
51
|
+
"""Generate image from prompt
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
prompt (str): Image description.
|
|
55
|
+
amount (int, optional): Total images to be generated. Defaults to 1.
|
|
56
|
+
model (str, optional): Model to use for generating images. Defaults to "flux-pro".
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
List[str]: List of generated image URLs.
|
|
60
|
+
"""
|
|
61
|
+
assert bool(prompt), "Prompt cannot be null"
|
|
62
|
+
assert isinstance(amount, int), f"Amount should be an integer only not {type(amount)}"
|
|
63
|
+
assert amount > 0, "Amount should be greater than 0"
|
|
64
|
+
assert model in self.AVAILABLE_MODELS, f"Model should be one of {self.AVAILABLE_MODELS}"
|
|
65
|
+
|
|
66
|
+
self.prompt = prompt
|
|
67
|
+
response = []
|
|
68
|
+
|
|
69
|
+
for _ in range(amount):
|
|
70
|
+
# JSON payload for the request
|
|
71
|
+
payload = {
|
|
72
|
+
"prompt": prompt,
|
|
73
|
+
"model": model,
|
|
74
|
+
"personaId": "image-generator"
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
# Sending the POST request
|
|
79
|
+
resp = requests.post(self.url, headers=self.headers, data=json.dumps(payload), timeout=self.timeout)
|
|
80
|
+
resp.raise_for_status()
|
|
81
|
+
|
|
82
|
+
# Process the response
|
|
83
|
+
response_data = resp.json()
|
|
84
|
+
image_url = response_data['data'][0]['url']
|
|
85
|
+
response.append(image_url)
|
|
86
|
+
|
|
87
|
+
except requests.exceptions.RequestException as e:
|
|
88
|
+
print(f"An error occurred: {e}")
|
|
89
|
+
raise
|
|
90
|
+
|
|
91
|
+
return response
|
|
92
|
+
|
|
93
|
+
def save(
|
|
94
|
+
self,
|
|
95
|
+
response: List[str], # List of image URLs
|
|
96
|
+
name: str = None,
|
|
97
|
+
dir: str = os.getcwd(),
|
|
98
|
+
filenames_prefix: str = "",
|
|
99
|
+
) -> List[str]:
|
|
100
|
+
"""Save generated images
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
response (List[str]): List of generated image URLs.
|
|
104
|
+
name (str): Filename for the images. Defaults to the last prompt.
|
|
105
|
+
dir (str, optional): Directory for saving images. Defaults to os.getcwd().
|
|
106
|
+
filenames_prefix (str, optional): String to be prefixed at each filename to be returned.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
List[str]: List of saved filenames.
|
|
110
|
+
"""
|
|
111
|
+
assert isinstance(response, list), f"Response should be of {list} not {type(response)}"
|
|
112
|
+
name = self.prompt if name is None else name
|
|
113
|
+
|
|
114
|
+
filenames = []
|
|
115
|
+
count = 0
|
|
116
|
+
for img_url in response:
|
|
117
|
+
def complete_path():
|
|
118
|
+
count_value = "" if count == 0 else f"_{count}"
|
|
119
|
+
return os.path.join(dir, name + count_value + "." + self.image_extension)
|
|
120
|
+
|
|
121
|
+
while os.path.isfile(complete_path()):
|
|
122
|
+
count += 1
|
|
123
|
+
|
|
124
|
+
absolute_path_to_file = complete_path()
|
|
125
|
+
filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
|
|
126
|
+
|
|
127
|
+
# Download and save the image
|
|
128
|
+
try:
|
|
129
|
+
img_response = requests.get(img_url, stream=True, timeout=self.timeout)
|
|
130
|
+
img_response.raise_for_status()
|
|
131
|
+
|
|
132
|
+
with open(absolute_path_to_file, "wb") as fh:
|
|
133
|
+
for chunk in img_response.iter_content(chunk_size=8192):
|
|
134
|
+
fh.write(chunk)
|
|
135
|
+
except requests.exceptions.RequestException as e:
|
|
136
|
+
print(f"An error occurred while downloading image from {img_url}: {e}")
|
|
137
|
+
raise
|
|
138
|
+
|
|
139
|
+
return filenames
|
|
140
|
+
|
|
141
|
+
# Example usage
|
|
142
|
+
if __name__ == "__main__":
|
|
143
|
+
bot = AmigoImager()
|
|
144
|
+
try:
|
|
145
|
+
resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
|
|
146
|
+
print(bot.save(resp))
|
|
147
|
+
except Exception as e:
|
|
148
|
+
print(f"An error occurred: {e}")
|