webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +20 -0
- webscout/Litlogger/core/logger.py +123 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +50 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +174 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +231 -0
- webscout/Litlogger/styles/formats.py +377 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +91 -78
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +74 -59
- webscout/Provider/Groq.py +30 -18
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +191 -137
- webscout/Provider/Netwrck.py +62 -50
- webscout/Provider/PI.py +79 -124
- webscout/Provider/PizzaGPT.py +129 -83
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +74 -47
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +144 -136
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +67 -39
- webscout/Provider/gaurish.py +105 -66
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +60 -35
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -230
- webscout/Provider/typegpt.py +359 -356
- webscout/Provider/yep.py +5 -5
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1142 -1140
- webscout/webscout_search_async.py +635 -635
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
- webscout-7.2.dist-info/RECORD +217 -0
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout/Provider/RUBIKSAI.py +0 -272
- webscout-7.0.dist-info/RECORD +0 -199
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
|
@@ -1,199 +1,199 @@
|
|
|
1
|
-
"""
|
|
2
|
-
AsyncHFimager - Your go-to async provider for generating fire images with HuggingFace! ⚡
|
|
3
|
-
|
|
4
|
-
Examples:
|
|
5
|
-
>>> from webscout import AsyncHFimager
|
|
6
|
-
>>> import asyncio
|
|
7
|
-
>>>
|
|
8
|
-
>>> async def example():
|
|
9
|
-
... # Initialize with your API key
|
|
10
|
-
... provider = AsyncHFimager(api_token="your-hf-token")
|
|
11
|
-
...
|
|
12
|
-
... # Generate a single image
|
|
13
|
-
... images = await provider.generate("A shiny red sports car")
|
|
14
|
-
... paths = await provider.save(images)
|
|
15
|
-
...
|
|
16
|
-
... # Generate multiple images with parameters
|
|
17
|
-
... images = await provider.generate(
|
|
18
|
-
... prompt="Epic dragon in cyberpunk city",
|
|
19
|
-
... amount=3,
|
|
20
|
-
... model="runwayml/stable-diffusion-v1-5",
|
|
21
|
-
... guidance_scale=7.5,
|
|
22
|
-
... negative_prompt="blurry, bad quality",
|
|
23
|
-
... num_inference_steps=50,
|
|
24
|
-
... width=768,
|
|
25
|
-
... height=768
|
|
26
|
-
... )
|
|
27
|
-
... paths = await provider.save(images, name="dragon", dir="outputs")
|
|
28
|
-
>>>
|
|
29
|
-
>>> # Run the example
|
|
30
|
-
>>> asyncio.run(example())
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
|
-
import os
|
|
34
|
-
import aiohttp
|
|
35
|
-
import aiofiles
|
|
36
|
-
import asyncio
|
|
37
|
-
from typing import Any, List, Optional, Dict
|
|
38
|
-
from webscout.AIbase import AsyncImageProvider
|
|
39
|
-
from webscout.Litlogger import
|
|
40
|
-
from webscout.litagent import LitAgent
|
|
41
|
-
|
|
42
|
-
# Initialize our fire logger and agent 🔥
|
|
43
|
-
logger =
|
|
44
|
-
"AsyncHuggingFace",
|
|
45
|
-
format=LogFormat.MODERN_EMOJI,
|
|
46
|
-
|
|
47
|
-
)
|
|
48
|
-
agent = LitAgent()
|
|
49
|
-
|
|
50
|
-
class AsyncHFimager(AsyncImageProvider):
|
|
51
|
-
"""Your go-to async provider for generating fire images with HuggingFace! ⚡"""
|
|
52
|
-
|
|
53
|
-
def __init__(
|
|
54
|
-
self,
|
|
55
|
-
api_token: str = None,
|
|
56
|
-
timeout: int = 60,
|
|
57
|
-
proxies: dict = {},
|
|
58
|
-
logging: bool = True
|
|
59
|
-
):
|
|
60
|
-
"""Initialize your async HuggingFace provider with custom settings! ⚙️
|
|
61
|
-
|
|
62
|
-
Args:
|
|
63
|
-
api_token (str, optional): HuggingFace API token. Uses env var "HUGGINGFACE_API_TOKEN" if None
|
|
64
|
-
timeout (int): Request timeout in seconds (default: 60)
|
|
65
|
-
proxies (dict): Proxy settings for requests (default: {})
|
|
66
|
-
logging (bool): Enable fire logging (default: True)
|
|
67
|
-
"""
|
|
68
|
-
self.base_url = "https://api-inference.huggingface.co/models/"
|
|
69
|
-
self.api_token = api_token or os.environ["HUGGINGFACE_API_TOKEN"]
|
|
70
|
-
self.headers = {
|
|
71
|
-
"Authorization": f"Bearer {self.api_token}",
|
|
72
|
-
"User-Agent": agent.random(),
|
|
73
|
-
"Accept": "application/json"
|
|
74
|
-
}
|
|
75
|
-
self.timeout = timeout
|
|
76
|
-
self.proxies = proxies
|
|
77
|
-
self.prompt: str = "AI-generated image - webscout"
|
|
78
|
-
self.image_extension: str = "jpg"
|
|
79
|
-
self.logging = logging
|
|
80
|
-
if self.logging:
|
|
81
|
-
logger.info("AsyncHuggingFace provider initialized! 🚀")
|
|
82
|
-
|
|
83
|
-
async def generate(
|
|
84
|
-
self,
|
|
85
|
-
prompt: str,
|
|
86
|
-
amount: int = 1,
|
|
87
|
-
model: str = "stabilityai/stable-diffusion-xl-base-1.0",
|
|
88
|
-
guidance_scale: Optional[float] = None,
|
|
89
|
-
negative_prompt: Optional[str] = None,
|
|
90
|
-
num_inference_steps: Optional[int] = None,
|
|
91
|
-
width: Optional[int] = None,
|
|
92
|
-
height: Optional[int] = None,
|
|
93
|
-
scheduler: Optional[str] = None,
|
|
94
|
-
seed: Optional[int] = None,
|
|
95
|
-
) -> List[bytes]:
|
|
96
|
-
"""Generate some fire images asynchronously! ⚡
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
prompt (str): Your lit image description
|
|
100
|
-
amount (int): How many images to generate (default: 1)
|
|
101
|
-
model (str): Which model to use (default: "stabilityai/stable-diffusion-xl-base-1.0")
|
|
102
|
-
guidance_scale (float, optional): Control how much to follow your prompt
|
|
103
|
-
negative_prompt (str, optional): What you don't want in the image
|
|
104
|
-
num_inference_steps (int, optional): More steps = better quality but slower
|
|
105
|
-
width (int, optional): Image width
|
|
106
|
-
height (int, optional): Image height
|
|
107
|
-
scheduler (str, optional): Which scheduler to use
|
|
108
|
-
seed (int, optional): Random seed for reproducibility
|
|
109
|
-
|
|
110
|
-
Returns:
|
|
111
|
-
List[bytes]: Your generated images as bytes
|
|
112
|
-
"""
|
|
113
|
-
assert bool(prompt), "Yo fam, prompt can't be empty! 🚫"
|
|
114
|
-
assert isinstance(amount, int), f"Amount gotta be an integer, not {type(amount)} 🤔"
|
|
115
|
-
assert amount > 0, "Amount gotta be greater than 0! 📈"
|
|
116
|
-
|
|
117
|
-
self.prompt = prompt
|
|
118
|
-
response = []
|
|
119
|
-
if self.logging:
|
|
120
|
-
logger.info(f"Generating {amount} images with {model}... 🎨")
|
|
121
|
-
|
|
122
|
-
async with aiohttp.ClientSession(headers=self.headers) as session:
|
|
123
|
-
for _ in range(amount):
|
|
124
|
-
url = self.base_url + model
|
|
125
|
-
payload: Dict[str, Any] = {"inputs": prompt}
|
|
126
|
-
parameters = {}
|
|
127
|
-
|
|
128
|
-
if guidance_scale is not None:
|
|
129
|
-
parameters["guidance_scale"] = guidance_scale
|
|
130
|
-
if negative_prompt is not None:
|
|
131
|
-
parameters["negative_prompt"] = negative_prompt
|
|
132
|
-
if num_inference_steps is not None:
|
|
133
|
-
parameters["num_inference_steps"] = num_inference_steps
|
|
134
|
-
if width is not None and height is not None:
|
|
135
|
-
parameters["target_size"] = {"width": width, "height": height}
|
|
136
|
-
if scheduler is not None:
|
|
137
|
-
parameters["scheduler"] = scheduler
|
|
138
|
-
if seed is not None:
|
|
139
|
-
parameters["seed"] = seed
|
|
140
|
-
|
|
141
|
-
if parameters:
|
|
142
|
-
payload["parameters"] = parameters
|
|
143
|
-
|
|
144
|
-
try:
|
|
145
|
-
async with session.post(url, json=payload, timeout=self.timeout) as resp:
|
|
146
|
-
resp.raise_for_status()
|
|
147
|
-
response.append(await resp.read())
|
|
148
|
-
if self.logging:
|
|
149
|
-
logger.success("Image generated successfully! 🎉")
|
|
150
|
-
except aiohttp.ClientError as e:
|
|
151
|
-
if self.logging:
|
|
152
|
-
logger.error(f"Failed to generate image: {e} 😢")
|
|
153
|
-
raise
|
|
154
|
-
|
|
155
|
-
return response
|
|
156
|
-
|
|
157
|
-
async def save(
|
|
158
|
-
self,
|
|
159
|
-
response: List[bytes],
|
|
160
|
-
name: str = None,
|
|
161
|
-
dir: str = os.getcwd(),
|
|
162
|
-
filenames_prefix: str = "",
|
|
163
|
-
) -> List[str]:
|
|
164
|
-
"""Save your fire images asynchronously! 💾
|
|
165
|
-
|
|
166
|
-
Args:
|
|
167
|
-
response (List[bytes]): Your generated images
|
|
168
|
-
name (str, optional): Custom name (default: uses prompt)
|
|
169
|
-
dir (str, optional): Where to save (default: current directory)
|
|
170
|
-
filenames_prefix (str, optional): Add prefix to filenames
|
|
171
|
-
|
|
172
|
-
Returns:
|
|
173
|
-
List[str]: Where your images were saved
|
|
174
|
-
"""
|
|
175
|
-
assert isinstance(response, list), f"Response gotta be a list, not {type(response)} 🤔"
|
|
176
|
-
name = self.prompt if name is None else name
|
|
177
|
-
|
|
178
|
-
filenames = []
|
|
179
|
-
count = 0
|
|
180
|
-
if self.logging:
|
|
181
|
-
logger.info(f"Saving {len(response)} images... 💾")
|
|
182
|
-
|
|
183
|
-
for image_bytes in response:
|
|
184
|
-
def complete_path():
|
|
185
|
-
count_value = "" if count == 0 else f"_{count}"
|
|
186
|
-
return os.path.join(dir, name + count_value + "." + self.image_extension)
|
|
187
|
-
|
|
188
|
-
while os.path.isfile(complete_path()):
|
|
189
|
-
count += 1
|
|
190
|
-
|
|
191
|
-
absolute_path_to_file = complete_path()
|
|
192
|
-
filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
|
|
193
|
-
|
|
194
|
-
async with aiofiles.open(absolute_path_to_file, "wb") as fh:
|
|
195
|
-
await fh.write(image_bytes)
|
|
196
|
-
|
|
197
|
-
if self.logging:
|
|
198
|
-
logger.success(f"Images saved successfully! Check {dir} 🎉")
|
|
199
|
-
return filenames
|
|
1
|
+
"""
|
|
2
|
+
AsyncHFimager - Your go-to async provider for generating fire images with HuggingFace! ⚡
|
|
3
|
+
|
|
4
|
+
Examples:
|
|
5
|
+
>>> from webscout import AsyncHFimager
|
|
6
|
+
>>> import asyncio
|
|
7
|
+
>>>
|
|
8
|
+
>>> async def example():
|
|
9
|
+
... # Initialize with your API key
|
|
10
|
+
... provider = AsyncHFimager(api_token="your-hf-token")
|
|
11
|
+
...
|
|
12
|
+
... # Generate a single image
|
|
13
|
+
... images = await provider.generate("A shiny red sports car")
|
|
14
|
+
... paths = await provider.save(images)
|
|
15
|
+
...
|
|
16
|
+
... # Generate multiple images with parameters
|
|
17
|
+
... images = await provider.generate(
|
|
18
|
+
... prompt="Epic dragon in cyberpunk city",
|
|
19
|
+
... amount=3,
|
|
20
|
+
... model="runwayml/stable-diffusion-v1-5",
|
|
21
|
+
... guidance_scale=7.5,
|
|
22
|
+
... negative_prompt="blurry, bad quality",
|
|
23
|
+
... num_inference_steps=50,
|
|
24
|
+
... width=768,
|
|
25
|
+
... height=768
|
|
26
|
+
... )
|
|
27
|
+
... paths = await provider.save(images, name="dragon", dir="outputs")
|
|
28
|
+
>>>
|
|
29
|
+
>>> # Run the example
|
|
30
|
+
>>> asyncio.run(example())
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
import os
|
|
34
|
+
import aiohttp
|
|
35
|
+
import aiofiles
|
|
36
|
+
import asyncio
|
|
37
|
+
from typing import Any, List, Optional, Dict
|
|
38
|
+
from webscout.AIbase import AsyncImageProvider
|
|
39
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
40
|
+
from webscout.litagent import LitAgent
|
|
41
|
+
|
|
42
|
+
# Initialize our fire logger and agent 🔥
|
|
43
|
+
logger = Logger(
|
|
44
|
+
"AsyncHuggingFace",
|
|
45
|
+
format=LogFormat.MODERN_EMOJI,
|
|
46
|
+
|
|
47
|
+
)
|
|
48
|
+
agent = LitAgent()
|
|
49
|
+
|
|
50
|
+
class AsyncHFimager(AsyncImageProvider):
|
|
51
|
+
"""Your go-to async provider for generating fire images with HuggingFace! ⚡"""
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
api_token: str = None,
|
|
56
|
+
timeout: int = 60,
|
|
57
|
+
proxies: dict = {},
|
|
58
|
+
logging: bool = True
|
|
59
|
+
):
|
|
60
|
+
"""Initialize your async HuggingFace provider with custom settings! ⚙️
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
api_token (str, optional): HuggingFace API token. Uses env var "HUGGINGFACE_API_TOKEN" if None
|
|
64
|
+
timeout (int): Request timeout in seconds (default: 60)
|
|
65
|
+
proxies (dict): Proxy settings for requests (default: {})
|
|
66
|
+
logging (bool): Enable fire logging (default: True)
|
|
67
|
+
"""
|
|
68
|
+
self.base_url = "https://api-inference.huggingface.co/models/"
|
|
69
|
+
self.api_token = api_token or os.environ["HUGGINGFACE_API_TOKEN"]
|
|
70
|
+
self.headers = {
|
|
71
|
+
"Authorization": f"Bearer {self.api_token}",
|
|
72
|
+
"User-Agent": agent.random(),
|
|
73
|
+
"Accept": "application/json"
|
|
74
|
+
}
|
|
75
|
+
self.timeout = timeout
|
|
76
|
+
self.proxies = proxies
|
|
77
|
+
self.prompt: str = "AI-generated image - webscout"
|
|
78
|
+
self.image_extension: str = "jpg"
|
|
79
|
+
self.logging = logging
|
|
80
|
+
if self.logging:
|
|
81
|
+
logger.info("AsyncHuggingFace provider initialized! 🚀")
|
|
82
|
+
|
|
83
|
+
async def generate(
|
|
84
|
+
self,
|
|
85
|
+
prompt: str,
|
|
86
|
+
amount: int = 1,
|
|
87
|
+
model: str = "stabilityai/stable-diffusion-xl-base-1.0",
|
|
88
|
+
guidance_scale: Optional[float] = None,
|
|
89
|
+
negative_prompt: Optional[str] = None,
|
|
90
|
+
num_inference_steps: Optional[int] = None,
|
|
91
|
+
width: Optional[int] = None,
|
|
92
|
+
height: Optional[int] = None,
|
|
93
|
+
scheduler: Optional[str] = None,
|
|
94
|
+
seed: Optional[int] = None,
|
|
95
|
+
) -> List[bytes]:
|
|
96
|
+
"""Generate some fire images asynchronously! ⚡
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
prompt (str): Your lit image description
|
|
100
|
+
amount (int): How many images to generate (default: 1)
|
|
101
|
+
model (str): Which model to use (default: "stabilityai/stable-diffusion-xl-base-1.0")
|
|
102
|
+
guidance_scale (float, optional): Control how much to follow your prompt
|
|
103
|
+
negative_prompt (str, optional): What you don't want in the image
|
|
104
|
+
num_inference_steps (int, optional): More steps = better quality but slower
|
|
105
|
+
width (int, optional): Image width
|
|
106
|
+
height (int, optional): Image height
|
|
107
|
+
scheduler (str, optional): Which scheduler to use
|
|
108
|
+
seed (int, optional): Random seed for reproducibility
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
List[bytes]: Your generated images as bytes
|
|
112
|
+
"""
|
|
113
|
+
assert bool(prompt), "Yo fam, prompt can't be empty! 🚫"
|
|
114
|
+
assert isinstance(amount, int), f"Amount gotta be an integer, not {type(amount)} 🤔"
|
|
115
|
+
assert amount > 0, "Amount gotta be greater than 0! 📈"
|
|
116
|
+
|
|
117
|
+
self.prompt = prompt
|
|
118
|
+
response = []
|
|
119
|
+
if self.logging:
|
|
120
|
+
logger.info(f"Generating {amount} images with {model}... 🎨")
|
|
121
|
+
|
|
122
|
+
async with aiohttp.ClientSession(headers=self.headers) as session:
|
|
123
|
+
for _ in range(amount):
|
|
124
|
+
url = self.base_url + model
|
|
125
|
+
payload: Dict[str, Any] = {"inputs": prompt}
|
|
126
|
+
parameters = {}
|
|
127
|
+
|
|
128
|
+
if guidance_scale is not None:
|
|
129
|
+
parameters["guidance_scale"] = guidance_scale
|
|
130
|
+
if negative_prompt is not None:
|
|
131
|
+
parameters["negative_prompt"] = negative_prompt
|
|
132
|
+
if num_inference_steps is not None:
|
|
133
|
+
parameters["num_inference_steps"] = num_inference_steps
|
|
134
|
+
if width is not None and height is not None:
|
|
135
|
+
parameters["target_size"] = {"width": width, "height": height}
|
|
136
|
+
if scheduler is not None:
|
|
137
|
+
parameters["scheduler"] = scheduler
|
|
138
|
+
if seed is not None:
|
|
139
|
+
parameters["seed"] = seed
|
|
140
|
+
|
|
141
|
+
if parameters:
|
|
142
|
+
payload["parameters"] = parameters
|
|
143
|
+
|
|
144
|
+
try:
|
|
145
|
+
async with session.post(url, json=payload, timeout=self.timeout) as resp:
|
|
146
|
+
resp.raise_for_status()
|
|
147
|
+
response.append(await resp.read())
|
|
148
|
+
if self.logging:
|
|
149
|
+
logger.success("Image generated successfully! 🎉")
|
|
150
|
+
except aiohttp.ClientError as e:
|
|
151
|
+
if self.logging:
|
|
152
|
+
logger.error(f"Failed to generate image: {e} 😢")
|
|
153
|
+
raise
|
|
154
|
+
|
|
155
|
+
return response
|
|
156
|
+
|
|
157
|
+
async def save(
|
|
158
|
+
self,
|
|
159
|
+
response: List[bytes],
|
|
160
|
+
name: str = None,
|
|
161
|
+
dir: str = os.getcwd(),
|
|
162
|
+
filenames_prefix: str = "",
|
|
163
|
+
) -> List[str]:
|
|
164
|
+
"""Save your fire images asynchronously! 💾
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
response (List[bytes]): Your generated images
|
|
168
|
+
name (str, optional): Custom name (default: uses prompt)
|
|
169
|
+
dir (str, optional): Where to save (default: current directory)
|
|
170
|
+
filenames_prefix (str, optional): Add prefix to filenames
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
List[str]: Where your images were saved
|
|
174
|
+
"""
|
|
175
|
+
assert isinstance(response, list), f"Response gotta be a list, not {type(response)} 🤔"
|
|
176
|
+
name = self.prompt if name is None else name
|
|
177
|
+
|
|
178
|
+
filenames = []
|
|
179
|
+
count = 0
|
|
180
|
+
if self.logging:
|
|
181
|
+
logger.info(f"Saving {len(response)} images... 💾")
|
|
182
|
+
|
|
183
|
+
for image_bytes in response:
|
|
184
|
+
def complete_path():
|
|
185
|
+
count_value = "" if count == 0 else f"_{count}"
|
|
186
|
+
return os.path.join(dir, name + count_value + "." + self.image_extension)
|
|
187
|
+
|
|
188
|
+
while os.path.isfile(complete_path()):
|
|
189
|
+
count += 1
|
|
190
|
+
|
|
191
|
+
absolute_path_to_file = complete_path()
|
|
192
|
+
filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
|
|
193
|
+
|
|
194
|
+
async with aiofiles.open(absolute_path_to_file, "wb") as fh:
|
|
195
|
+
await fh.write(image_bytes)
|
|
196
|
+
|
|
197
|
+
if self.logging:
|
|
198
|
+
logger.success(f"Images saved successfully! Check {dir} 🎉")
|
|
199
|
+
return filenames
|