webscout 7.1__py3-none-any.whl → 7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +166 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +460 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/AISEARCH/ISou.py +277 -0
- webscout/Provider/AISEARCH/__init__.py +2 -1
- webscout/Provider/Blackboxai.py +3 -3
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +3 -4
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +40 -24
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/Groq.py +5 -1
- webscout/Provider/Jadve.py +3 -3
- webscout/Provider/Marcus.py +191 -192
- webscout/Provider/Netwrck.py +3 -3
- webscout/Provider/PI.py +2 -2
- webscout/Provider/PizzaGPT.py +2 -3
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +28 -8
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +146 -134
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +2 -3
- webscout/Provider/freeaichat.py +221 -0
- webscout/Provider/gaurish.py +2 -3
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +3 -3
- webscout/Provider/llmchat.py +2 -3
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -331
- webscout/Provider/typegpt.py +359 -359
- webscout/Provider/yep.py +3 -3
- webscout/__init__.py +1 -0
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +87 -6
- webscout/webscout_search_async.py +58 -1
- webscout/yep_search.py +297 -0
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/METADATA +62 -22
- webscout-7.3.dist-info/RECORD +223 -0
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/WHEEL +1 -1
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout-7.1.dist-info/RECORD +0 -198
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/LICENSE.md +0 -0
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/entry_points.txt +0 -0
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/top_level.txt +0 -0
|
@@ -1,195 +1,195 @@
|
|
|
1
|
-
"""
|
|
2
|
-
HFimager - Your go-to provider for generating fire images with HuggingFace! 🔥
|
|
3
|
-
|
|
4
|
-
Examples:
|
|
5
|
-
>>> from webscout import HFimager
|
|
6
|
-
>>>
|
|
7
|
-
>>> # Initialize with your API key
|
|
8
|
-
>>> provider = HFimager(api_token="your-hf-token")
|
|
9
|
-
>>>
|
|
10
|
-
>>> # Generate a single image
|
|
11
|
-
>>> images = provider.generate("A shiny red sports car", model="stabilityai/stable-diffusion-xl-base-1.0")
|
|
12
|
-
>>> paths = provider.save(images)
|
|
13
|
-
>>>
|
|
14
|
-
>>> # Generate multiple images with parameters
|
|
15
|
-
>>> images = provider.generate(
|
|
16
|
-
... prompt="Epic dragon in cyberpunk city",
|
|
17
|
-
... amount=3,
|
|
18
|
-
... model="runwayml/stable-diffusion-v1-5",
|
|
19
|
-
... guidance_scale=7.5,
|
|
20
|
-
... negative_prompt="blurry, bad quality",
|
|
21
|
-
... num_inference_steps=50,
|
|
22
|
-
... width=768,
|
|
23
|
-
... height=768
|
|
24
|
-
... )
|
|
25
|
-
>>> paths = provider.save(images, name="dragon", dir="outputs")
|
|
26
|
-
"""
|
|
27
|
-
|
|
28
|
-
import os
|
|
29
|
-
import requests
|
|
30
|
-
import io
|
|
31
|
-
from PIL import Image
|
|
32
|
-
from typing import Any, List, Optional, Dict
|
|
33
|
-
from webscout.AIbase import ImageProvider
|
|
34
|
-
from webscout.Litlogger import
|
|
35
|
-
from webscout.litagent import LitAgent
|
|
36
|
-
|
|
37
|
-
# Initialize our fire logger and agent 🔥
|
|
38
|
-
logger =
|
|
39
|
-
"HuggingFace",
|
|
40
|
-
format=LogFormat.MODERN_EMOJI,
|
|
41
|
-
|
|
42
|
-
)
|
|
43
|
-
agent = LitAgent()
|
|
44
|
-
|
|
45
|
-
class HFimager(ImageProvider):
|
|
46
|
-
"""Your go-to provider for generating fire images with HuggingFace! 🔥"""
|
|
47
|
-
|
|
48
|
-
def __init__(
|
|
49
|
-
self,
|
|
50
|
-
api_token: str = None,
|
|
51
|
-
timeout: int = 60,
|
|
52
|
-
proxies: dict = {},
|
|
53
|
-
logging: bool = True
|
|
54
|
-
):
|
|
55
|
-
"""Initialize your HuggingFace provider with custom settings! ⚙️
|
|
56
|
-
|
|
57
|
-
Args:
|
|
58
|
-
api_token (str, optional): HuggingFace API token. Uses env var "HUGGINGFACE_API_TOKEN" if None
|
|
59
|
-
timeout (int): Request timeout in seconds (default: 60)
|
|
60
|
-
proxies (dict): Proxy settings for requests (default: {})
|
|
61
|
-
logging (bool): Enable fire logging (default: True)
|
|
62
|
-
"""
|
|
63
|
-
self.base_url = "https://api-inference.huggingface.co/models/"
|
|
64
|
-
self.api_token = api_token or os.environ["HUGGINGFACE_API_TOKEN"]
|
|
65
|
-
self.headers = {
|
|
66
|
-
"Authorization": f"Bearer {self.api_token}",
|
|
67
|
-
"User-Agent": agent.random(),
|
|
68
|
-
"Accept": "application/json"
|
|
69
|
-
}
|
|
70
|
-
self.session = requests.Session()
|
|
71
|
-
self.session.headers.update(self.headers)
|
|
72
|
-
self.session.proxies.update(proxies)
|
|
73
|
-
self.timeout = timeout
|
|
74
|
-
self.prompt: str = "AI-generated image - webscout"
|
|
75
|
-
self.image_extension: str = "jpg"
|
|
76
|
-
self.logging = logging
|
|
77
|
-
if self.logging:
|
|
78
|
-
logger.info("HuggingFace provider initialized! 🚀")
|
|
79
|
-
|
|
80
|
-
def generate(
|
|
81
|
-
self,
|
|
82
|
-
prompt: str,
|
|
83
|
-
amount: int = 1,
|
|
84
|
-
model: str = "stabilityai/stable-diffusion-xl-base-1.0",
|
|
85
|
-
guidance_scale: Optional[float] = None,
|
|
86
|
-
negative_prompt: Optional[str] = None,
|
|
87
|
-
num_inference_steps: Optional[int] = None,
|
|
88
|
-
width: Optional[int] = None,
|
|
89
|
-
height: Optional[int] = None,
|
|
90
|
-
scheduler: Optional[str] = None,
|
|
91
|
-
seed: Optional[int] = None,
|
|
92
|
-
) -> List[bytes]:
|
|
93
|
-
"""Generate some fire images! 🎨
|
|
94
|
-
|
|
95
|
-
Args:
|
|
96
|
-
prompt (str): Your lit image description
|
|
97
|
-
amount (int): How many images to generate (default: 1)
|
|
98
|
-
model (str): Which model to use (default: "stabilityai/stable-diffusion-xl-base-1.0")
|
|
99
|
-
guidance_scale (float, optional): Control how much to follow your prompt
|
|
100
|
-
negative_prompt (str, optional): What you don't want in the image
|
|
101
|
-
num_inference_steps (int, optional): More steps = better quality but slower
|
|
102
|
-
width (int, optional): Image width
|
|
103
|
-
height (int, optional): Image height
|
|
104
|
-
scheduler (str, optional): Which scheduler to use
|
|
105
|
-
seed (int, optional): Random seed for reproducibility
|
|
106
|
-
|
|
107
|
-
Returns:
|
|
108
|
-
List[bytes]: Your generated images as bytes
|
|
109
|
-
"""
|
|
110
|
-
assert bool(prompt), "Yo fam, prompt can't be empty! 🚫"
|
|
111
|
-
assert isinstance(amount, int), f"Amount gotta be an integer, not {type(amount)} 🤔"
|
|
112
|
-
assert amount > 0, "Amount gotta be greater than 0! 📈"
|
|
113
|
-
|
|
114
|
-
self.prompt = prompt
|
|
115
|
-
response = []
|
|
116
|
-
if self.logging:
|
|
117
|
-
logger.info(f"Generating {amount} images with {model}... 🎨")
|
|
118
|
-
|
|
119
|
-
for _ in range(amount):
|
|
120
|
-
url = self.base_url + model
|
|
121
|
-
payload: Dict[str, Any] = {"inputs": prompt}
|
|
122
|
-
parameters = {}
|
|
123
|
-
|
|
124
|
-
if guidance_scale is not None:
|
|
125
|
-
parameters["guidance_scale"] = guidance_scale
|
|
126
|
-
if negative_prompt is not None:
|
|
127
|
-
parameters["negative_prompt"] = negative_prompt
|
|
128
|
-
if num_inference_steps is not None:
|
|
129
|
-
parameters["num_inference_steps"] = num_inference_steps
|
|
130
|
-
if width is not None and height is not None:
|
|
131
|
-
parameters["target_size"] = {"width": width, "height": height}
|
|
132
|
-
if scheduler is not None:
|
|
133
|
-
parameters["scheduler"] = scheduler
|
|
134
|
-
if seed is not None:
|
|
135
|
-
parameters["seed"] = seed
|
|
136
|
-
|
|
137
|
-
if parameters:
|
|
138
|
-
payload["parameters"] = parameters
|
|
139
|
-
|
|
140
|
-
try:
|
|
141
|
-
resp = self.session.post(url, headers=self.headers, json=payload, timeout=self.timeout)
|
|
142
|
-
resp.raise_for_status()
|
|
143
|
-
response.append(resp.content)
|
|
144
|
-
if self.logging:
|
|
145
|
-
logger.success("Image generated successfully! 🎉")
|
|
146
|
-
except requests.RequestException as e:
|
|
147
|
-
if self.logging:
|
|
148
|
-
logger.error(f"Failed to generate image: {e} 😢")
|
|
149
|
-
raise
|
|
150
|
-
|
|
151
|
-
return response
|
|
152
|
-
|
|
153
|
-
def save(
|
|
154
|
-
self,
|
|
155
|
-
response: List[bytes],
|
|
156
|
-
name: str = None,
|
|
157
|
-
dir: str = os.getcwd(),
|
|
158
|
-
filenames_prefix: str = "",
|
|
159
|
-
) -> List[str]:
|
|
160
|
-
"""Save your fire images! 💾
|
|
161
|
-
|
|
162
|
-
Args:
|
|
163
|
-
response (List[bytes]): Your generated images
|
|
164
|
-
name (str, optional): Custom name (default: uses prompt)
|
|
165
|
-
dir (str, optional): Where to save (default: current directory)
|
|
166
|
-
filenames_prefix (str, optional): Add prefix to filenames
|
|
167
|
-
|
|
168
|
-
Returns:
|
|
169
|
-
List[str]: Where your images were saved
|
|
170
|
-
"""
|
|
171
|
-
assert isinstance(response, list), f"Response gotta be a list, not {type(response)} 🤔"
|
|
172
|
-
name = self.prompt if name is None else name
|
|
173
|
-
|
|
174
|
-
filenames = []
|
|
175
|
-
count = 0
|
|
176
|
-
if self.logging:
|
|
177
|
-
logger.info(f"Saving {len(response)} images... 💾")
|
|
178
|
-
|
|
179
|
-
for image_bytes in response:
|
|
180
|
-
def complete_path():
|
|
181
|
-
count_value = "" if count == 0 else f"_{count}"
|
|
182
|
-
return os.path.join(dir, name + count_value + "." + self.image_extension)
|
|
183
|
-
|
|
184
|
-
while os.path.isfile(complete_path()):
|
|
185
|
-
count += 1
|
|
186
|
-
|
|
187
|
-
absolute_path_to_file = complete_path()
|
|
188
|
-
filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
|
|
189
|
-
|
|
190
|
-
with open(absolute_path_to_file, "wb") as fh:
|
|
191
|
-
fh.write(image_bytes)
|
|
192
|
-
|
|
193
|
-
if self.logging:
|
|
194
|
-
logger.success(f"Images saved successfully! Check {dir} 🎉")
|
|
195
|
-
return filenames
|
|
1
|
+
"""
|
|
2
|
+
HFimager - Your go-to provider for generating fire images with HuggingFace! 🔥
|
|
3
|
+
|
|
4
|
+
Examples:
|
|
5
|
+
>>> from webscout import HFimager
|
|
6
|
+
>>>
|
|
7
|
+
>>> # Initialize with your API key
|
|
8
|
+
>>> provider = HFimager(api_token="your-hf-token")
|
|
9
|
+
>>>
|
|
10
|
+
>>> # Generate a single image
|
|
11
|
+
>>> images = provider.generate("A shiny red sports car", model="stabilityai/stable-diffusion-xl-base-1.0")
|
|
12
|
+
>>> paths = provider.save(images)
|
|
13
|
+
>>>
|
|
14
|
+
>>> # Generate multiple images with parameters
|
|
15
|
+
>>> images = provider.generate(
|
|
16
|
+
... prompt="Epic dragon in cyberpunk city",
|
|
17
|
+
... amount=3,
|
|
18
|
+
... model="runwayml/stable-diffusion-v1-5",
|
|
19
|
+
... guidance_scale=7.5,
|
|
20
|
+
... negative_prompt="blurry, bad quality",
|
|
21
|
+
... num_inference_steps=50,
|
|
22
|
+
... width=768,
|
|
23
|
+
... height=768
|
|
24
|
+
... )
|
|
25
|
+
>>> paths = provider.save(images, name="dragon", dir="outputs")
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
import os
|
|
29
|
+
import requests
|
|
30
|
+
import io
|
|
31
|
+
from PIL import Image
|
|
32
|
+
from typing import Any, List, Optional, Dict
|
|
33
|
+
from webscout.AIbase import ImageProvider
|
|
34
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
35
|
+
from webscout.litagent import LitAgent
|
|
36
|
+
|
|
37
|
+
# Initialize our fire logger and agent 🔥
|
|
38
|
+
logger = Logger(
|
|
39
|
+
"HuggingFace",
|
|
40
|
+
format=LogFormat.MODERN_EMOJI,
|
|
41
|
+
|
|
42
|
+
)
|
|
43
|
+
agent = LitAgent()
|
|
44
|
+
|
|
45
|
+
class HFimager(ImageProvider):
|
|
46
|
+
"""Your go-to provider for generating fire images with HuggingFace! 🔥"""
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
api_token: str = None,
|
|
51
|
+
timeout: int = 60,
|
|
52
|
+
proxies: dict = {},
|
|
53
|
+
logging: bool = True
|
|
54
|
+
):
|
|
55
|
+
"""Initialize your HuggingFace provider with custom settings! ⚙️
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
api_token (str, optional): HuggingFace API token. Uses env var "HUGGINGFACE_API_TOKEN" if None
|
|
59
|
+
timeout (int): Request timeout in seconds (default: 60)
|
|
60
|
+
proxies (dict): Proxy settings for requests (default: {})
|
|
61
|
+
logging (bool): Enable fire logging (default: True)
|
|
62
|
+
"""
|
|
63
|
+
self.base_url = "https://api-inference.huggingface.co/models/"
|
|
64
|
+
self.api_token = api_token or os.environ["HUGGINGFACE_API_TOKEN"]
|
|
65
|
+
self.headers = {
|
|
66
|
+
"Authorization": f"Bearer {self.api_token}",
|
|
67
|
+
"User-Agent": agent.random(),
|
|
68
|
+
"Accept": "application/json"
|
|
69
|
+
}
|
|
70
|
+
self.session = requests.Session()
|
|
71
|
+
self.session.headers.update(self.headers)
|
|
72
|
+
self.session.proxies.update(proxies)
|
|
73
|
+
self.timeout = timeout
|
|
74
|
+
self.prompt: str = "AI-generated image - webscout"
|
|
75
|
+
self.image_extension: str = "jpg"
|
|
76
|
+
self.logging = logging
|
|
77
|
+
if self.logging:
|
|
78
|
+
logger.info("HuggingFace provider initialized! 🚀")
|
|
79
|
+
|
|
80
|
+
def generate(
|
|
81
|
+
self,
|
|
82
|
+
prompt: str,
|
|
83
|
+
amount: int = 1,
|
|
84
|
+
model: str = "stabilityai/stable-diffusion-xl-base-1.0",
|
|
85
|
+
guidance_scale: Optional[float] = None,
|
|
86
|
+
negative_prompt: Optional[str] = None,
|
|
87
|
+
num_inference_steps: Optional[int] = None,
|
|
88
|
+
width: Optional[int] = None,
|
|
89
|
+
height: Optional[int] = None,
|
|
90
|
+
scheduler: Optional[str] = None,
|
|
91
|
+
seed: Optional[int] = None,
|
|
92
|
+
) -> List[bytes]:
|
|
93
|
+
"""Generate some fire images! 🎨
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
prompt (str): Your lit image description
|
|
97
|
+
amount (int): How many images to generate (default: 1)
|
|
98
|
+
model (str): Which model to use (default: "stabilityai/stable-diffusion-xl-base-1.0")
|
|
99
|
+
guidance_scale (float, optional): Control how much to follow your prompt
|
|
100
|
+
negative_prompt (str, optional): What you don't want in the image
|
|
101
|
+
num_inference_steps (int, optional): More steps = better quality but slower
|
|
102
|
+
width (int, optional): Image width
|
|
103
|
+
height (int, optional): Image height
|
|
104
|
+
scheduler (str, optional): Which scheduler to use
|
|
105
|
+
seed (int, optional): Random seed for reproducibility
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
List[bytes]: Your generated images as bytes
|
|
109
|
+
"""
|
|
110
|
+
assert bool(prompt), "Yo fam, prompt can't be empty! 🚫"
|
|
111
|
+
assert isinstance(amount, int), f"Amount gotta be an integer, not {type(amount)} 🤔"
|
|
112
|
+
assert amount > 0, "Amount gotta be greater than 0! 📈"
|
|
113
|
+
|
|
114
|
+
self.prompt = prompt
|
|
115
|
+
response = []
|
|
116
|
+
if self.logging:
|
|
117
|
+
logger.info(f"Generating {amount} images with {model}... 🎨")
|
|
118
|
+
|
|
119
|
+
for _ in range(amount):
|
|
120
|
+
url = self.base_url + model
|
|
121
|
+
payload: Dict[str, Any] = {"inputs": prompt}
|
|
122
|
+
parameters = {}
|
|
123
|
+
|
|
124
|
+
if guidance_scale is not None:
|
|
125
|
+
parameters["guidance_scale"] = guidance_scale
|
|
126
|
+
if negative_prompt is not None:
|
|
127
|
+
parameters["negative_prompt"] = negative_prompt
|
|
128
|
+
if num_inference_steps is not None:
|
|
129
|
+
parameters["num_inference_steps"] = num_inference_steps
|
|
130
|
+
if width is not None and height is not None:
|
|
131
|
+
parameters["target_size"] = {"width": width, "height": height}
|
|
132
|
+
if scheduler is not None:
|
|
133
|
+
parameters["scheduler"] = scheduler
|
|
134
|
+
if seed is not None:
|
|
135
|
+
parameters["seed"] = seed
|
|
136
|
+
|
|
137
|
+
if parameters:
|
|
138
|
+
payload["parameters"] = parameters
|
|
139
|
+
|
|
140
|
+
try:
|
|
141
|
+
resp = self.session.post(url, headers=self.headers, json=payload, timeout=self.timeout)
|
|
142
|
+
resp.raise_for_status()
|
|
143
|
+
response.append(resp.content)
|
|
144
|
+
if self.logging:
|
|
145
|
+
logger.success("Image generated successfully! 🎉")
|
|
146
|
+
except requests.RequestException as e:
|
|
147
|
+
if self.logging:
|
|
148
|
+
logger.error(f"Failed to generate image: {e} 😢")
|
|
149
|
+
raise
|
|
150
|
+
|
|
151
|
+
return response
|
|
152
|
+
|
|
153
|
+
def save(
|
|
154
|
+
self,
|
|
155
|
+
response: List[bytes],
|
|
156
|
+
name: str = None,
|
|
157
|
+
dir: str = os.getcwd(),
|
|
158
|
+
filenames_prefix: str = "",
|
|
159
|
+
) -> List[str]:
|
|
160
|
+
"""Save your fire images! 💾
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
response (List[bytes]): Your generated images
|
|
164
|
+
name (str, optional): Custom name (default: uses prompt)
|
|
165
|
+
dir (str, optional): Where to save (default: current directory)
|
|
166
|
+
filenames_prefix (str, optional): Add prefix to filenames
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
List[str]: Where your images were saved
|
|
170
|
+
"""
|
|
171
|
+
assert isinstance(response, list), f"Response gotta be a list, not {type(response)} 🤔"
|
|
172
|
+
name = self.prompt if name is None else name
|
|
173
|
+
|
|
174
|
+
filenames = []
|
|
175
|
+
count = 0
|
|
176
|
+
if self.logging:
|
|
177
|
+
logger.info(f"Saving {len(response)} images... 💾")
|
|
178
|
+
|
|
179
|
+
for image_bytes in response:
|
|
180
|
+
def complete_path():
|
|
181
|
+
count_value = "" if count == 0 else f"_{count}"
|
|
182
|
+
return os.path.join(dir, name + count_value + "." + self.image_extension)
|
|
183
|
+
|
|
184
|
+
while os.path.isfile(complete_path()):
|
|
185
|
+
count += 1
|
|
186
|
+
|
|
187
|
+
absolute_path_to_file = complete_path()
|
|
188
|
+
filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
|
|
189
|
+
|
|
190
|
+
with open(absolute_path_to_file, "wb") as fh:
|
|
191
|
+
fh.write(image_bytes)
|
|
192
|
+
|
|
193
|
+
if self.logging:
|
|
194
|
+
logger.success(f"Images saved successfully! Check {dir} 🎉")
|
|
195
|
+
return filenames
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from .sync_ninza import NinjaImager
|
|
2
|
-
from .async_ninza import AsyncNinjaImager
|
|
3
|
-
|
|
4
|
-
__all__ = ["NinjaImager", "AsyncNinjaImager"]
|
|
1
|
+
from .sync_ninza import NinjaImager
|
|
2
|
+
from .async_ninza import AsyncNinjaImager
|
|
3
|
+
|
|
4
|
+
__all__ = ["NinjaImager", "AsyncNinjaImager"]
|