webscout 7.2__py3-none-any.whl → 7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Bard.py +2 -2
- webscout/Litlogger/core/level.py +3 -0
- webscout/Litlogger/core/logger.py +101 -58
- webscout/Litlogger/handlers/console.py +14 -31
- webscout/Litlogger/handlers/network.py +16 -17
- webscout/Litlogger/styles/colors.py +81 -63
- webscout/Litlogger/styles/formats.py +163 -80
- webscout/Provider/AISEARCH/ISou.py +277 -0
- webscout/Provider/AISEARCH/__init__.py +2 -1
- webscout/Provider/Deepinfra.py +40 -24
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -0
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TextPollinationsAI.py +26 -5
- webscout/Provider/__init__.py +2 -0
- webscout/Provider/freeaichat.py +221 -0
- webscout/Provider/yep.py +1 -1
- webscout/__init__.py +1 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +82 -2
- webscout/webscout_search_async.py +58 -1
- webscout/yep_search.py +297 -0
- {webscout-7.2.dist-info → webscout-7.3.dist-info}/METADATA +59 -20
- {webscout-7.2.dist-info → webscout-7.3.dist-info}/RECORD +29 -23
- {webscout-7.2.dist-info → webscout-7.3.dist-info}/WHEEL +1 -1
- {webscout-7.2.dist-info → webscout-7.3.dist-info}/LICENSE.md +0 -0
- {webscout-7.2.dist-info → webscout-7.3.dist-info}/entry_points.txt +0 -0
- {webscout-7.2.dist-info → webscout-7.3.dist-info}/top_level.txt +0 -0
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -9,7 +9,8 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
|
9
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
10
10
|
from webscout import exceptions
|
|
11
11
|
from webscout import LitAgent
|
|
12
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
12
|
+
from webscout.Litlogger import Logger, LogFormat, ConsoleHandler
|
|
13
|
+
from webscout.Litlogger.core.level import LogLevel
|
|
13
14
|
|
|
14
15
|
class DeepInfra(Provider):
|
|
15
16
|
"""
|
|
@@ -79,15 +80,20 @@ class DeepInfra(Provider):
|
|
|
79
80
|
)
|
|
80
81
|
self.conversation.history_offset = history_offset
|
|
81
82
|
|
|
82
|
-
# Initialize logger
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
83
|
+
# Initialize logger with proper configuration
|
|
84
|
+
if logging:
|
|
85
|
+
console_handler = ConsoleHandler(
|
|
86
|
+
level=LogLevel.DEBUG,
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
self.logger = Logger(
|
|
90
|
+
name="DeepInfra",
|
|
91
|
+
level=LogLevel.DEBUG,
|
|
92
|
+
handlers=[console_handler]
|
|
93
|
+
)
|
|
94
|
+
self.logger.info("DeepInfra initialized successfully ✨")
|
|
95
|
+
else:
|
|
96
|
+
self.logger = None
|
|
91
97
|
|
|
92
98
|
def ask(
|
|
93
99
|
self,
|
|
@@ -97,6 +103,9 @@ class DeepInfra(Provider):
|
|
|
97
103
|
optimizer: str = None,
|
|
98
104
|
conversationally: bool = False,
|
|
99
105
|
) -> Union[Dict[str, Any], Generator]:
|
|
106
|
+
if self.logger:
|
|
107
|
+
self.logger.debug(f"Processing request - Stream: {stream}, Optimizer: {optimizer}")
|
|
108
|
+
|
|
100
109
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
101
110
|
if optimizer:
|
|
102
111
|
if optimizer in self.__available_optimizers:
|
|
@@ -104,7 +113,7 @@ class DeepInfra(Provider):
|
|
|
104
113
|
conversation_prompt if conversationally else prompt
|
|
105
114
|
)
|
|
106
115
|
if self.logger:
|
|
107
|
-
self.logger.
|
|
116
|
+
self.logger.info(f"Applied optimizer: {optimizer} 🔧")
|
|
108
117
|
else:
|
|
109
118
|
if self.logger:
|
|
110
119
|
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
@@ -120,25 +129,30 @@ class DeepInfra(Provider):
|
|
|
120
129
|
"stream": stream
|
|
121
130
|
}
|
|
122
131
|
|
|
132
|
+
if self.logger:
|
|
133
|
+
self.logger.debug(f"Sending request to model: {self.model} 🚀")
|
|
134
|
+
|
|
123
135
|
def for_stream():
|
|
124
136
|
if self.logger:
|
|
125
|
-
self.logger.
|
|
137
|
+
self.logger.info("Starting stream processing ⚡")
|
|
126
138
|
try:
|
|
127
139
|
with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
|
|
128
140
|
if response.status_code != 200:
|
|
129
141
|
if self.logger:
|
|
130
|
-
self.logger.error(f"Request failed with status
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
142
|
+
self.logger.error(f"Request failed with status {response.status_code} ❌")
|
|
143
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
144
|
+
f"Request failed with status code {response.status_code}"
|
|
145
|
+
)
|
|
146
|
+
|
|
135
147
|
streaming_text = ""
|
|
136
148
|
for line in response.iter_lines(decode_unicode=True):
|
|
137
149
|
if line:
|
|
138
150
|
line = line.strip()
|
|
139
151
|
if line.startswith("data: "):
|
|
140
|
-
json_str = line[6:]
|
|
152
|
+
json_str = line[6:]
|
|
141
153
|
if json_str == "[DONE]":
|
|
154
|
+
if self.logger:
|
|
155
|
+
self.logger.info("Stream completed successfully ✅")
|
|
142
156
|
break
|
|
143
157
|
try:
|
|
144
158
|
json_data = json.loads(json_str)
|
|
@@ -151,17 +165,19 @@ class DeepInfra(Provider):
|
|
|
151
165
|
yield resp if raw else resp
|
|
152
166
|
except json.JSONDecodeError:
|
|
153
167
|
if self.logger:
|
|
154
|
-
self.logger.error("
|
|
155
|
-
|
|
168
|
+
self.logger.error("Failed to decode JSON response 🔥")
|
|
169
|
+
continue
|
|
170
|
+
|
|
156
171
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
157
|
-
|
|
158
|
-
self.logger.info("Streaming response completed successfully")
|
|
172
|
+
|
|
159
173
|
except requests.RequestException as e:
|
|
160
174
|
if self.logger:
|
|
161
|
-
self.logger.error(f"Request failed: {e}")
|
|
162
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
175
|
+
self.logger.error(f"Request failed: {str(e)} 🔥")
|
|
176
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
163
177
|
|
|
164
178
|
def for_non_stream():
|
|
179
|
+
if self.logger:
|
|
180
|
+
self.logger.debug("Processing non-stream request")
|
|
165
181
|
for _ in for_stream():
|
|
166
182
|
pass
|
|
167
183
|
return self.last_response
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""
|
|
2
|
+
FreeAI Provider Package
|
|
3
|
+
Provides access to various AI models for image generation including DALL-E 3 and Flux models
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from .sync_freeaiplayground import FreeAIImager
|
|
7
|
+
from .async_freeaiplayground import AsyncFreeAIImager
|
|
8
|
+
|
|
9
|
+
__all__ = ['FreeAIImager', 'AsyncFreeAIImager']
|
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
import aiohttp
|
|
2
|
+
import asyncio
|
|
3
|
+
import os
|
|
4
|
+
from typing import List, Union, AsyncGenerator
|
|
5
|
+
from string import punctuation
|
|
6
|
+
from random import choice
|
|
7
|
+
import aiofiles
|
|
8
|
+
|
|
9
|
+
from webscout.AIbase import AsyncImageProvider
|
|
10
|
+
from webscout.litagent import LitAgent # Import our fire user agent generator 🔥
|
|
11
|
+
from webscout.Litlogger import Logger # For that cyberpunk logging swag ⚡
|
|
12
|
+
|
|
13
|
+
# Initialize our fire logger 🚀
|
|
14
|
+
logger = Logger("AsyncFreeAIPlayground")
|
|
15
|
+
|
|
16
|
+
class AsyncFreeAIImager(AsyncImageProvider):
|
|
17
|
+
"""
|
|
18
|
+
Async FreeAI Image Provider - Your go-to for fire AI art! 🎨
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
AVAILABLE_MODELS = [
|
|
22
|
+
"dall-e-3",
|
|
23
|
+
"Flux Pro Ultra",
|
|
24
|
+
"Flux Pro",
|
|
25
|
+
"Flux Pro Ultra Raw",
|
|
26
|
+
"Flux Schnell",
|
|
27
|
+
"Flux Realism",
|
|
28
|
+
"grok-2-aurora"
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
model: str = "dall-e-3", # Updated default model
|
|
34
|
+
timeout: int = 60,
|
|
35
|
+
proxies: dict = {},
|
|
36
|
+
logging: bool = True
|
|
37
|
+
):
|
|
38
|
+
"""Initialize your async FreeAIPlayground provider with custom settings! ⚙️
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
model (str): Which model to use (default: dall-e-3)
|
|
42
|
+
timeout (int): Request timeout in seconds (default: 60)
|
|
43
|
+
proxies (dict): Proxy settings for requests (default: {})
|
|
44
|
+
logging (bool): Enable fire logging (default: True)
|
|
45
|
+
"""
|
|
46
|
+
self.image_gen_endpoint: str = "https://api.freeaichatplayground.com/v1/images/generations"
|
|
47
|
+
self.headers = {
|
|
48
|
+
"Accept": "application/json",
|
|
49
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
50
|
+
"Content-Type": "application/json",
|
|
51
|
+
"User-Agent": LitAgent().random(), # Using our fire random agent! 🔥
|
|
52
|
+
"Origin": "https://freeaichatplayground.com",
|
|
53
|
+
"Referer": "https://freeaichatplayground.com/",
|
|
54
|
+
}
|
|
55
|
+
self.timeout = aiohttp.ClientTimeout(total=timeout)
|
|
56
|
+
self.model = model
|
|
57
|
+
self.proxies = proxies
|
|
58
|
+
self.prompt: str = "AI-generated image - webscout"
|
|
59
|
+
self.image_extension: str = "png"
|
|
60
|
+
self.logging = logging
|
|
61
|
+
if self.logging:
|
|
62
|
+
logger.info("AsyncFreeAIPlayground initialized! Ready to create some fire art! 🚀")
|
|
63
|
+
|
|
64
|
+
async def generate(
|
|
65
|
+
self, prompt: str, amount: int = 1, additives: bool = True,
|
|
66
|
+
size: str = "1024x1024", quality: str = "standard",
|
|
67
|
+
style: str = "vivid", max_retries: int = 3, retry_delay: int = 5
|
|
68
|
+
) -> List[bytes]:
|
|
69
|
+
"""Generate some fire images from your prompt! 🎨
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
prompt (str): Your creative prompt
|
|
73
|
+
amount (int): How many images to generate
|
|
74
|
+
additives (bool): Add random characters to make prompts unique
|
|
75
|
+
size (str): Image size (1024x1024, 1024x1792, 1792x1024)
|
|
76
|
+
quality (str): Image quality (standard, hd)
|
|
77
|
+
style (str): Image style (vivid, natural)
|
|
78
|
+
max_retries (int): Max retry attempts if generation fails
|
|
79
|
+
retry_delay (int): Delay between retries in seconds
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
List[bytes]: Your generated images as bytes
|
|
83
|
+
"""
|
|
84
|
+
assert bool(prompt), "Prompt cannot be null"
|
|
85
|
+
assert isinstance(amount, int), f"Amount should be an integer only not {type(amount)}"
|
|
86
|
+
assert amount > 0, "Amount should be greater than 0"
|
|
87
|
+
|
|
88
|
+
ads = lambda: (
|
|
89
|
+
""
|
|
90
|
+
if not additives
|
|
91
|
+
else choice(punctuation)
|
|
92
|
+
+ choice(punctuation)
|
|
93
|
+
+ choice(punctuation)
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
if self.logging:
|
|
97
|
+
logger.info(f"Generating {amount} images... 🎨")
|
|
98
|
+
|
|
99
|
+
self.prompt = prompt
|
|
100
|
+
response = []
|
|
101
|
+
|
|
102
|
+
async with aiohttp.ClientSession(headers=self.headers, timeout=self.timeout) as session:
|
|
103
|
+
for i in range(amount):
|
|
104
|
+
payload = {
|
|
105
|
+
"model": self.model,
|
|
106
|
+
"prompt": prompt + ads(),
|
|
107
|
+
"n": 1,
|
|
108
|
+
"size": size,
|
|
109
|
+
"quality": quality,
|
|
110
|
+
"style": style
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
for attempt in range(max_retries):
|
|
114
|
+
try:
|
|
115
|
+
async with session.post(self.image_gen_endpoint, json=payload) as resp:
|
|
116
|
+
resp.raise_for_status()
|
|
117
|
+
data = await resp.json()
|
|
118
|
+
image_url = data['data'][0]['url']
|
|
119
|
+
|
|
120
|
+
# Get the image data from the URL
|
|
121
|
+
async with session.get(image_url) as img_resp:
|
|
122
|
+
img_resp.raise_for_status()
|
|
123
|
+
image_bytes = await img_resp.read()
|
|
124
|
+
response.append(image_bytes)
|
|
125
|
+
|
|
126
|
+
if self.logging:
|
|
127
|
+
logger.success(f"Generated image {len(response)}/{amount}! 🎨")
|
|
128
|
+
break
|
|
129
|
+
except Exception as e:
|
|
130
|
+
if attempt == max_retries - 1:
|
|
131
|
+
if self.logging:
|
|
132
|
+
logger.error(f"Failed to generate image after {max_retries} attempts: {e} 😢")
|
|
133
|
+
raise
|
|
134
|
+
if self.logging:
|
|
135
|
+
logger.warning(f"Attempt {attempt + 1} failed, retrying in {retry_delay}s... 🔄")
|
|
136
|
+
await asyncio.sleep(retry_delay)
|
|
137
|
+
|
|
138
|
+
if self.logging:
|
|
139
|
+
logger.success("All images generated successfully! 🎉")
|
|
140
|
+
return response
|
|
141
|
+
|
|
142
|
+
async def save(
|
|
143
|
+
self,
|
|
144
|
+
response: Union[List[bytes], AsyncGenerator[bytes, None]],
|
|
145
|
+
name: str = None,
|
|
146
|
+
dir: str = os.getcwd(),
|
|
147
|
+
filenames_prefix: str = "",
|
|
148
|
+
) -> List[str]:
|
|
149
|
+
"""Save your fire images! 💾
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
response (Union[List[bytes], AsyncGenerator[bytes, None]]): Image data
|
|
153
|
+
name (str, optional): Base name for saved files
|
|
154
|
+
dir (str, optional): Where to save the images
|
|
155
|
+
filenames_prefix (str, optional): Prefix for filenames
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
List[str]: List of saved filenames
|
|
159
|
+
"""
|
|
160
|
+
if not os.path.exists(dir):
|
|
161
|
+
os.makedirs(dir)
|
|
162
|
+
if self.logging:
|
|
163
|
+
logger.info(f"Created directory: {dir} 📁")
|
|
164
|
+
|
|
165
|
+
name = self.prompt if name is None else name
|
|
166
|
+
saved_paths = []
|
|
167
|
+
|
|
168
|
+
async def save_single_image(image_bytes: bytes, index: int) -> str:
|
|
169
|
+
filename = f"{filenames_prefix}{name}_{index}.{self.image_extension}"
|
|
170
|
+
filepath = os.path.join(dir, filename)
|
|
171
|
+
|
|
172
|
+
async with aiofiles.open(filepath, "wb") as f:
|
|
173
|
+
await f.write(image_bytes)
|
|
174
|
+
|
|
175
|
+
if self.logging:
|
|
176
|
+
logger.success(f"Saved image to: {filepath} 💾")
|
|
177
|
+
return filename
|
|
178
|
+
|
|
179
|
+
if isinstance(response, list):
|
|
180
|
+
image_list = response
|
|
181
|
+
else:
|
|
182
|
+
image_list = [chunk async for chunk in response]
|
|
183
|
+
|
|
184
|
+
if self.logging:
|
|
185
|
+
logger.info(f"Saving {len(image_list)} images... 💾")
|
|
186
|
+
|
|
187
|
+
tasks = [save_single_image(img, i) for i, img in enumerate(image_list)]
|
|
188
|
+
saved_paths = await asyncio.gather(*tasks)
|
|
189
|
+
|
|
190
|
+
if self.logging:
|
|
191
|
+
logger.success(f"All images saved successfully! Check {dir} 🎉")
|
|
192
|
+
return saved_paths
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
if __name__ == "__main__":
|
|
196
|
+
async def main():
|
|
197
|
+
bot = AsyncFreeAIImager()
|
|
198
|
+
try:
|
|
199
|
+
resp = await bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
|
|
200
|
+
paths = await bot.save(resp)
|
|
201
|
+
print(paths)
|
|
202
|
+
except Exception as e:
|
|
203
|
+
if bot.logging:
|
|
204
|
+
logger.error(f"An error occurred: {e} 😢")
|
|
205
|
+
|
|
206
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import os
|
|
3
|
+
from typing import List
|
|
4
|
+
from string import punctuation
|
|
5
|
+
from random import choice
|
|
6
|
+
from random import randint
|
|
7
|
+
import base64
|
|
8
|
+
|
|
9
|
+
from webscout.AIbase import ImageProvider
|
|
10
|
+
from webscout.litagent import LitAgent # Import our fire user agent generator 🔥
|
|
11
|
+
from webscout.Litlogger import Logger # For that cyberpunk logging swag ⚡
|
|
12
|
+
|
|
13
|
+
# Initialize our fire logger 🚀
|
|
14
|
+
logger = Logger("FreeAIPlayground")
|
|
15
|
+
|
|
16
|
+
class FreeAIImager(ImageProvider):
|
|
17
|
+
"""
|
|
18
|
+
FreeAI Image Provider - Your go-to for fire AI art! 🎨
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
AVAILABLE_MODELS = [
|
|
22
|
+
"dall-e-3",
|
|
23
|
+
"Flux Pro Ultra",
|
|
24
|
+
"Flux Pro",
|
|
25
|
+
"Flux Pro Ultra Raw",
|
|
26
|
+
"Flux Schnell",
|
|
27
|
+
"Flux Realism",
|
|
28
|
+
"grok-2-aurora"
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
model: str = "dall-e-3", # Updated default model
|
|
34
|
+
timeout: int = 60,
|
|
35
|
+
proxies: dict = {},
|
|
36
|
+
logging: bool = True
|
|
37
|
+
):
|
|
38
|
+
"""Initialize your FreeAIPlayground provider with custom settings! ⚙️
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
model (str): Which model to use (default: dall-e-3)
|
|
42
|
+
timeout (int): Request timeout in seconds (default: 60)
|
|
43
|
+
proxies (dict): Proxy settings for requests (default: {})
|
|
44
|
+
logging (bool): Enable fire logging (default: True)
|
|
45
|
+
"""
|
|
46
|
+
self.image_gen_endpoint: str = "https://api.freeaichatplayground.com/v1/images/generations"
|
|
47
|
+
self.headers = {
|
|
48
|
+
"Accept": "application/json",
|
|
49
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
50
|
+
"Content-Type": "application/json",
|
|
51
|
+
"User-Agent": LitAgent().random(), # Using our fire random agent! 🔥
|
|
52
|
+
"Origin": "https://freeaichatplayground.com",
|
|
53
|
+
"Referer": "https://freeaichatplayground.com/",
|
|
54
|
+
}
|
|
55
|
+
self.session = requests.Session()
|
|
56
|
+
self.session.headers.update(self.headers)
|
|
57
|
+
self.session.proxies.update(proxies)
|
|
58
|
+
self.timeout = timeout
|
|
59
|
+
self.model = model
|
|
60
|
+
self.prompt: str = "AI-generated image - webscout"
|
|
61
|
+
self.image_extension: str = "png"
|
|
62
|
+
self.logging = logging
|
|
63
|
+
if self.logging:
|
|
64
|
+
logger.info("FreeAIPlayground initialized! Ready to create some fire art! 🚀")
|
|
65
|
+
|
|
66
|
+
def generate(
|
|
67
|
+
self, prompt: str, amount: int = 1, additives: bool = True,
|
|
68
|
+
size: str = "1024x1024", quality: str = "standard",
|
|
69
|
+
style: str = "vivid"
|
|
70
|
+
) -> List[bytes]:
|
|
71
|
+
"""Generate some fire images from your prompt! 🎨
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
prompt (str): Your creative prompt
|
|
75
|
+
amount (int): How many images to generate
|
|
76
|
+
additives (bool): Add random characters to make prompts unique
|
|
77
|
+
size (str): Image size (1024x1024, 1024x1792, 1792x1024)
|
|
78
|
+
quality (str): Image quality (standard, hd)
|
|
79
|
+
style (str): Image style (vivid, natural)
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
List[bytes]: Your generated images as bytes
|
|
83
|
+
"""
|
|
84
|
+
assert bool(prompt), "Prompt cannot be null"
|
|
85
|
+
assert isinstance(amount, int), f"Amount should be an integer only not {type(amount)}"
|
|
86
|
+
assert amount > 0, "Amount should be greater than 0"
|
|
87
|
+
|
|
88
|
+
ads = lambda: (
|
|
89
|
+
""
|
|
90
|
+
if not additives
|
|
91
|
+
else choice(punctuation)
|
|
92
|
+
+ choice(punctuation)
|
|
93
|
+
+ choice(punctuation)
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
if self.logging:
|
|
97
|
+
logger.info(f"Generating {amount} images... 🎨")
|
|
98
|
+
|
|
99
|
+
self.prompt = prompt
|
|
100
|
+
response = []
|
|
101
|
+
for _ in range(amount):
|
|
102
|
+
payload = {
|
|
103
|
+
"model": self.model,
|
|
104
|
+
"prompt": prompt + ads(),
|
|
105
|
+
"n": 1,
|
|
106
|
+
"size": size,
|
|
107
|
+
"quality": quality,
|
|
108
|
+
"style": style
|
|
109
|
+
}
|
|
110
|
+
try:
|
|
111
|
+
resp = self.session.post(
|
|
112
|
+
url=self.image_gen_endpoint,
|
|
113
|
+
json=payload,
|
|
114
|
+
timeout=self.timeout
|
|
115
|
+
)
|
|
116
|
+
resp.raise_for_status()
|
|
117
|
+
image_url = resp.json()['data'][0]['url']
|
|
118
|
+
# Get the image data from the URL
|
|
119
|
+
img_resp = self.session.get(image_url, timeout=self.timeout)
|
|
120
|
+
img_resp.raise_for_status()
|
|
121
|
+
response.append(img_resp.content)
|
|
122
|
+
if self.logging:
|
|
123
|
+
logger.success(f"Generated image {len(response)}/{amount}! 🎨")
|
|
124
|
+
except Exception as e:
|
|
125
|
+
if self.logging:
|
|
126
|
+
logger.error(f"Failed to generate image: {e} 😢")
|
|
127
|
+
raise
|
|
128
|
+
|
|
129
|
+
if self.logging:
|
|
130
|
+
logger.success("All images generated successfully! 🎉")
|
|
131
|
+
return response
|
|
132
|
+
|
|
133
|
+
def save(
|
|
134
|
+
self,
|
|
135
|
+
response: List[bytes],
|
|
136
|
+
name: str = None,
|
|
137
|
+
dir: str = os.getcwd(),
|
|
138
|
+
filenames_prefix: str = "",
|
|
139
|
+
) -> List[str]:
|
|
140
|
+
"""Save your fire images! 💾
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
response (List[bytes]): List of image data
|
|
144
|
+
name (str, optional): Base name for saved files
|
|
145
|
+
dir (str, optional): Where to save the images
|
|
146
|
+
filenames_prefix (str, optional): Prefix for filenames
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
List[str]: List of saved filenames
|
|
150
|
+
"""
|
|
151
|
+
assert isinstance(response, list), f"Response should be of {list} not {type(response)}"
|
|
152
|
+
name = self.prompt if name is None else name
|
|
153
|
+
|
|
154
|
+
if not os.path.exists(dir):
|
|
155
|
+
os.makedirs(dir)
|
|
156
|
+
if self.logging:
|
|
157
|
+
logger.info(f"Created directory: {dir} 📁")
|
|
158
|
+
|
|
159
|
+
if self.logging:
|
|
160
|
+
logger.info(f"Saving {len(response)} images... 💾")
|
|
161
|
+
|
|
162
|
+
filenames = []
|
|
163
|
+
count = 0
|
|
164
|
+
for image in response:
|
|
165
|
+
def complete_path():
|
|
166
|
+
count_value = "" if count == 0 else f"_{count}"
|
|
167
|
+
return os.path.join(dir, name + count_value + "." + self.image_extension)
|
|
168
|
+
|
|
169
|
+
while os.path.isfile(complete_path()):
|
|
170
|
+
count += 1
|
|
171
|
+
|
|
172
|
+
absolute_path_to_file = complete_path()
|
|
173
|
+
filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
|
|
174
|
+
|
|
175
|
+
with open(absolute_path_to_file, "wb") as fh:
|
|
176
|
+
fh.write(image)
|
|
177
|
+
if self.logging:
|
|
178
|
+
logger.success(f"Saved image to: {absolute_path_to_file} 💾")
|
|
179
|
+
|
|
180
|
+
if self.logging:
|
|
181
|
+
logger.success(f"All images saved successfully! Check {dir} 🎉")
|
|
182
|
+
return filenames
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
if __name__ == "__main__":
|
|
186
|
+
bot = FreeAIImager()
|
|
187
|
+
try:
|
|
188
|
+
resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
|
|
189
|
+
print(bot.save(resp))
|
|
190
|
+
except Exception as e:
|
|
191
|
+
if bot.logging:
|
|
192
|
+
logger.error(f"An error occurred: {e} 😢")
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from .FreeAIPlayground import *
|
|
1
2
|
from .deepinfra import *
|
|
2
3
|
from .PollinationsAI import *
|
|
3
4
|
from .AiForce import *
|
|
@@ -6,4 +7,4 @@ from .Nexra import *
|
|
|
6
7
|
from .huggingface import *
|
|
7
8
|
from .artbit import *
|
|
8
9
|
from .imgninza import *
|
|
9
|
-
from .talkai import *
|
|
10
|
+
from .talkai import *
|
|
@@ -13,10 +13,30 @@ class TextPollinationsAI(Provider):
|
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
15
|
AVAILABLE_MODELS = [
|
|
16
|
-
"openai",
|
|
17
|
-
"
|
|
18
|
-
"
|
|
19
|
-
"
|
|
16
|
+
"openai", # OpenAI GPT-4o-mini
|
|
17
|
+
"openai-large", # OpenAI GPT-4o
|
|
18
|
+
"openai-reasoning", # OpenAI o1-mini
|
|
19
|
+
"qwen-coder", # Qwen 2.5 Coder 32B
|
|
20
|
+
"llama", # Llama 3.3 70B
|
|
21
|
+
"mistral", # Mistral Nemo
|
|
22
|
+
"unity", # Unity with Mistral Large
|
|
23
|
+
"midijourney", # Midijourney musical transformer
|
|
24
|
+
"rtist", # Rtist image generator
|
|
25
|
+
"searchgpt", # SearchGPT with realtime search
|
|
26
|
+
"evil", # Evil Mode - Experimental
|
|
27
|
+
"deepseek", # DeepSeek-V3
|
|
28
|
+
"claude-hybridspace", # Claude Hybridspace
|
|
29
|
+
"deepseek-r1", # DeepSeek-R1 Distill Qwen 32B
|
|
30
|
+
"deepseek-reasoner", # DeepSeek R1 - Full
|
|
31
|
+
"llamalight", # Llama 3.1 8B Instruct
|
|
32
|
+
"llamaguard", # Llamaguard 7B AWQ
|
|
33
|
+
"gemini", # Gemini 2.0 Flash
|
|
34
|
+
"gemini-thinking", # Gemini 2.0 Flash Thinking
|
|
35
|
+
"hormoz", # Hormoz 8b
|
|
36
|
+
"hypnosis-tracy", # Hypnosis Tracy
|
|
37
|
+
"sur", # Sur AI Assistant
|
|
38
|
+
"sur-mistral", # Sur AI Assistant (Mistral)
|
|
39
|
+
"llama-scaleway" # Llama (Scaleway)
|
|
20
40
|
]
|
|
21
41
|
|
|
22
42
|
def __init__(
|
|
@@ -225,4 +245,5 @@ if __name__ == "__main__":
|
|
|
225
245
|
ai = TextPollinationsAI(model="deepseek-r1", logging=True)
|
|
226
246
|
response = ai.chat(input(">>> "), stream=True)
|
|
227
247
|
for chunk in response:
|
|
228
|
-
print(chunk, end="", flush=True)
|
|
248
|
+
print(chunk, end="", flush=True)
|
|
249
|
+
|
webscout/Provider/__init__.py
CHANGED
|
@@ -69,6 +69,7 @@ from .QwenLM import *
|
|
|
69
69
|
from .granite import *
|
|
70
70
|
from .WiseCat import *
|
|
71
71
|
from .DeepSeek import *
|
|
72
|
+
from .freeaichat import FreeAIChat
|
|
72
73
|
__all__ = [
|
|
73
74
|
'LLAMA',
|
|
74
75
|
'DeepSeek',
|
|
@@ -141,4 +142,5 @@ __all__ = [
|
|
|
141
142
|
'JadveOpenAI',
|
|
142
143
|
'ChatGLM',
|
|
143
144
|
'NousHermes',
|
|
145
|
+
'FreeAIChat',
|
|
144
146
|
]
|