webscout 6.3__py3-none-any.whl → 6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (85) hide show
  1. webscout/AIauto.py +191 -176
  2. webscout/AIbase.py +0 -197
  3. webscout/AIutel.py +488 -1130
  4. webscout/Bing_search.py +250 -153
  5. webscout/DWEBS.py +151 -19
  6. webscout/Extra/__init__.py +2 -1
  7. webscout/Extra/autocoder/__init__.py +9 -0
  8. webscout/Extra/autocoder/autocoder_utiles.py +121 -0
  9. webscout/Extra/autocoder/rawdog.py +681 -0
  10. webscout/Extra/autollama.py +246 -195
  11. webscout/Extra/gguf.py +441 -416
  12. webscout/LLM.py +206 -43
  13. webscout/Litlogger/__init__.py +681 -0
  14. webscout/Provider/DARKAI.py +1 -1
  15. webscout/Provider/EDITEE.py +1 -1
  16. webscout/Provider/NinjaChat.py +1 -1
  17. webscout/Provider/PI.py +221 -207
  18. webscout/Provider/Perplexity.py +598 -598
  19. webscout/Provider/RoboCoders.py +206 -0
  20. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  21. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
  22. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
  23. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  24. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  25. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  26. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  27. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
  28. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
  29. webscout/Provider/TTI/__init__.py +2 -4
  30. webscout/Provider/TTI/artbit/__init__.py +22 -0
  31. webscout/Provider/TTI/artbit/async_artbit.py +184 -0
  32. webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
  33. webscout/Provider/TTI/blackbox/__init__.py +4 -0
  34. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
  35. webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
  36. webscout/Provider/TTI/deepinfra/__init__.py +4 -0
  37. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
  38. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
  39. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  40. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  41. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  42. webscout/Provider/TTI/imgninza/__init__.py +4 -0
  43. webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
  44. webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
  45. webscout/Provider/TTI/talkai/__init__.py +4 -0
  46. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  47. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  48. webscout/Provider/__init__.py +146 -139
  49. webscout/Provider/askmyai.py +2 -2
  50. webscout/Provider/cerebras.py +227 -219
  51. webscout/Provider/llama3mitril.py +0 -1
  52. webscout/Provider/mhystical.py +176 -0
  53. webscout/Provider/perplexitylabs.py +265 -0
  54. webscout/Provider/twitterclone.py +251 -245
  55. webscout/Provider/typegpt.py +359 -0
  56. webscout/__init__.py +28 -23
  57. webscout/__main__.py +5 -5
  58. webscout/cli.py +252 -280
  59. webscout/conversation.py +227 -0
  60. webscout/exceptions.py +161 -29
  61. webscout/litagent/__init__.py +172 -0
  62. webscout/litprinter/__init__.py +831 -0
  63. webscout/optimizers.py +270 -0
  64. webscout/prompt_manager.py +279 -0
  65. webscout/swiftcli/__init__.py +810 -0
  66. webscout/transcriber.py +479 -551
  67. webscout/update_checker.py +125 -0
  68. webscout/version.py +1 -1
  69. {webscout-6.3.dist-info → webscout-6.4.dist-info}/METADATA +26 -45
  70. {webscout-6.3.dist-info → webscout-6.4.dist-info}/RECORD +75 -45
  71. webscout/Provider/TTI/AIuncensoredimage.py +0 -103
  72. webscout/Provider/TTI/Nexra.py +0 -120
  73. webscout/Provider/TTI/PollinationsAI.py +0 -138
  74. webscout/Provider/TTI/WebSimAI.py +0 -142
  75. webscout/Provider/TTI/aiforce.py +0 -160
  76. webscout/Provider/TTI/artbit.py +0 -141
  77. webscout/Provider/TTI/deepinfra.py +0 -148
  78. webscout/Provider/TTI/huggingface.py +0 -155
  79. webscout/Provider/TTI/talkai.py +0 -116
  80. webscout/models.py +0 -23
  81. /webscout/{g4f.py → gpt4free.py} +0 -0
  82. {webscout-6.3.dist-info → webscout-6.4.dist-info}/LICENSE.md +0 -0
  83. {webscout-6.3.dist-info → webscout-6.4.dist-info}/WHEEL +0 -0
  84. {webscout-6.3.dist-info → webscout-6.4.dist-info}/entry_points.txt +0 -0
  85. {webscout-6.3.dist-info → webscout-6.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,227 @@
1
+ import aiohttp
2
+ import asyncio
3
+ import os
4
+ import base64
5
+ from typing import List, Union, AsyncGenerator
6
+ from string import punctuation
7
+ from random import choice
8
+ from random import randint
9
+ import aiofiles
10
+
11
+ from webscout.AIbase import AsyncImageProvider
12
+ from webscout.litagent import LitAgent # Import our fire user agent generator 🔥
13
+ from webscout.Litlogger import LitLogger # For that cyberpunk logging swag ⚡
14
+
15
+ # Initialize our fire logger 🚀
16
+ logger = LitLogger("AsyncDeepInfraImager")
17
+
18
+ class AsyncDeepInfraImager(AsyncImageProvider):
19
+ """
20
+ Async DeepInfra Image Provider - Your go-to for fire AI art! 🎨
21
+
22
+ >>> # Generate some fire art asynchronously! 🔥
23
+ >>> async def generate_art():
24
+ ... imager = AsyncDeepInfraImager(logging=True)
25
+ ... images = await imager.generate("Epic dragon breathing fire", amount=2)
26
+ ... paths = await imager.save(images)
27
+ ... print(paths)
28
+ >>> asyncio.run(generate_art())
29
+ ['epic_dragon_0.png', 'epic_dragon_1.png']
30
+
31
+ >>> # Turn off logging for stealth mode 🥷
32
+ >>> async def stealth_art():
33
+ ... quiet_imager = AsyncDeepInfraImager(logging=False)
34
+ ... images = await quiet_imager.generate("Cyberpunk city at night")
35
+ ... paths = await quiet_imager.save(images)
36
+ >>> asyncio.run(stealth_art())
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ model: str = "black-forest-labs/FLUX-1-schnell",
42
+ timeout: int = 60,
43
+ proxies: dict = {},
44
+ logging: bool = True
45
+ ):
46
+ """Initialize your async DeepInfra provider with custom settings! ⚙️
47
+
48
+ Args:
49
+ model (str): Which model to use (default: black-forest-labs/FLUX-1-schnell)
50
+ timeout (int): Request timeout in seconds (default: 60)
51
+ proxies (dict): Proxy settings for requests (default: {})
52
+ logging (bool): Enable fire logging (default: True)
53
+ """
54
+ self.image_gen_endpoint: str = f"https://api.deepinfra.com/v1/inference/{model}"
55
+ self.headers = {
56
+ "Accept": "application/json, text/plain, */*",
57
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
58
+ "Accept-Encoding": "gzip, deflate, br, zstd",
59
+ "User-Agent": LitAgent().random(), # Using our fire random agent! 🔥
60
+ "DNT": "1",
61
+ "Origin": "https://deepinfra.com",
62
+ "Referer": "https://deepinfra.com/",
63
+ "Sec-CH-UA": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
64
+ "Sec-CH-UA-Mobile": "?0",
65
+ "Sec-CH-UA-Platform": '"Windows"',
66
+ "Sec-Fetch-Dest": "empty",
67
+ "Sec-Fetch-Mode": "cors",
68
+ "Sec-Fetch-Site": "same-site"
69
+ }
70
+ self.timeout = timeout
71
+ self.proxies = proxies
72
+ self.prompt: str = "AI-generated image - webscout"
73
+ self.image_extension: str = "png"
74
+ self.logging = logging
75
+ if self.logging:
76
+ logger.info("AsyncDeepInfraImager initialized! Ready to create some fire art! 🚀")
77
+
78
+ async def generate(
79
+ self, prompt: str, amount: int = 1, additives: bool = True,
80
+ num_inference_steps: int = 25, guidance_scale: float = 7.5,
81
+ width: int = 1024, height: int = 1024, seed: int = None,
82
+ max_retries: int = 3, retry_delay: int = 5
83
+ ) -> List[bytes]:
84
+ """Generate some fire images from your prompt! 🎨
85
+
86
+ Args:
87
+ prompt (str): Your creative prompt
88
+ amount (int): How many images to generate
89
+ additives (bool): Add random characters to make prompts unique
90
+ num_inference_steps (int): Number of inference steps
91
+ guidance_scale (float): Guidance scale for generation
92
+ width (int): Image width
93
+ height (int): Image height
94
+ seed (int, optional): Random seed for reproducibility
95
+ max_retries (int): Max retry attempts if generation fails
96
+ retry_delay (int): Seconds to wait between retries
97
+
98
+ Returns:
99
+ List[bytes]: Your generated images as bytes
100
+ """
101
+ assert bool(prompt), "Prompt cannot be null"
102
+ assert isinstance(amount, int), f"Amount should be an integer only not {type(amount)}"
103
+ assert amount > 0, "Amount should be greater than 0"
104
+
105
+ ads = lambda: (
106
+ ""
107
+ if not additives
108
+ else choice(punctuation)
109
+ + choice(punctuation)
110
+ + choice(punctuation)
111
+ + choice(punctuation)
112
+ + choice(punctuation)
113
+ )
114
+
115
+ self.prompt = prompt
116
+ response = []
117
+
118
+ if self.logging:
119
+ logger.info(f"Generating {amount} images... 🎨")
120
+
121
+ async with aiohttp.ClientSession(headers=self.headers) as session:
122
+ for _ in range(amount):
123
+ payload = {
124
+ "prompt": prompt + ads(),
125
+ "num_inference_steps": num_inference_steps,
126
+ "guidance_scale": guidance_scale,
127
+ "width": width,
128
+ "height": height,
129
+ "seed": seed if seed is not None else randint(1, 10000),
130
+ }
131
+
132
+ for attempt in range(max_retries):
133
+ try:
134
+ async with session.post(
135
+ self.image_gen_endpoint,
136
+ json=payload,
137
+ timeout=self.timeout,
138
+ proxy=self.proxies.get('http') if self.proxies else None
139
+ ) as resp:
140
+ resp.raise_for_status()
141
+ data = await resp.json()
142
+ # Extract base64 encoded image data and decode it
143
+ image_data = data['images'][0].split(",")[1]
144
+ image_bytes = base64.b64decode(image_data)
145
+ response.append(image_bytes)
146
+ if self.logging:
147
+ logger.success(f"Generated image {len(response)}/{amount}! 🎨")
148
+ break
149
+ except aiohttp.ClientError as e:
150
+ if attempt == max_retries - 1:
151
+ if self.logging:
152
+ logger.error(f"Failed to generate image after {max_retries} attempts: {e} 😢")
153
+ raise
154
+ else:
155
+ if self.logging:
156
+ logger.warning(f"Attempt {attempt + 1} failed. Retrying in {retry_delay} seconds... 🔄")
157
+ await asyncio.sleep(retry_delay)
158
+
159
+ if self.logging:
160
+ logger.success("All images generated successfully! 🎉")
161
+ return response
162
+
163
+ async def save(
164
+ self,
165
+ response: Union[List[bytes], AsyncGenerator[bytes, None]],
166
+ name: str = None,
167
+ dir: str = os.getcwd(),
168
+ filenames_prefix: str = "",
169
+ ) -> List[str]:
170
+ """Save your fire images! 💾
171
+
172
+ Args:
173
+ response (Union[List[bytes], AsyncGenerator[bytes, None]]): Image data
174
+ name (str, optional): Base name for saved files
175
+ dir (str, optional): Where to save the images
176
+ filenames_prefix (str, optional): Prefix for filenames
177
+
178
+ Returns:
179
+ List[str]: List of saved filenames
180
+ """
181
+ if not os.path.exists(dir):
182
+ os.makedirs(dir)
183
+ if self.logging:
184
+ logger.info(f"Created directory: {dir} 📁")
185
+
186
+ name = self.prompt if name is None else name
187
+ saved_paths = []
188
+
189
+ async def save_single_image(image_bytes: bytes, index: int) -> str:
190
+ filename = f"{filenames_prefix}{name}_{index}.{self.image_extension}"
191
+ filepath = os.path.join(dir, filename)
192
+
193
+ async with aiofiles.open(filepath, "wb") as f:
194
+ await f.write(image_bytes)
195
+
196
+ if self.logging:
197
+ logger.success(f"Saved image to: {filepath} 💾")
198
+ return filename
199
+
200
+ if isinstance(response, list):
201
+ image_list = response
202
+ else:
203
+ image_list = [chunk async for chunk in response]
204
+
205
+ if self.logging:
206
+ logger.info(f"Saving {len(image_list)} images... 💾")
207
+
208
+ tasks = [save_single_image(img, i) for i, img in enumerate(image_list)]
209
+ saved_paths = await asyncio.gather(*tasks)
210
+
211
+ if self.logging:
212
+ logger.success(f"All images saved successfully! Check {dir} 🎉")
213
+ return saved_paths
214
+
215
+
216
+ if __name__ == "__main__":
217
+ async def main():
218
+ bot = AsyncDeepInfraImager()
219
+ try:
220
+ resp = await bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
221
+ paths = await bot.save(resp)
222
+ print(paths)
223
+ except Exception as e:
224
+ if bot.logging:
225
+ logger.error(f"An error occurred: {e} 😢")
226
+
227
+ asyncio.run(main())
@@ -0,0 +1,199 @@
1
+ import requests
2
+ import os
3
+ from typing import List
4
+ from string import punctuation
5
+ from random import choice
6
+ from random import randint
7
+ import base64
8
+
9
+ from webscout.AIbase import ImageProvider
10
+ from webscout.litagent import LitAgent # Import our fire user agent generator 🔥
11
+ from webscout.Litlogger import LitLogger # For that cyberpunk logging swag ⚡
12
+
13
+ # Initialize our fire logger 🚀
14
+ logger = LitLogger("DeepInfraImager")
15
+
16
+ class DeepInfraImager(ImageProvider):
17
+ """
18
+ DeepInfra Image Provider - Your go-to for fire AI art! 🎨
19
+
20
+ >>> # Generate some fire art! 🔥
21
+ >>> imager = DeepInfraImager(logging=True)
22
+ >>> images = imager.generate("Epic dragon breathing fire", amount=2)
23
+ >>> paths = imager.save(images)
24
+ >>> print(paths)
25
+ ['epic_dragon_0.png', 'epic_dragon_1.png']
26
+
27
+ >>> # Turn off logging for stealth mode 🥷
28
+ >>> quiet_imager = DeepInfraImager(logging=False)
29
+ >>> images = quiet_imager.generate("Cyberpunk city at night")
30
+ >>> paths = quiet_imager.save(images)
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ model: str = "black-forest-labs/FLUX-1-schnell",
36
+ timeout: int = 60,
37
+ proxies: dict = {},
38
+ logging: bool = True
39
+ ):
40
+ """Initialize your DeepInfra provider with custom settings! ⚙️
41
+
42
+ Args:
43
+ model (str): Which model to use (default: black-forest-labs/FLUX-1-schnell)
44
+ timeout (int): Request timeout in seconds (default: 60)
45
+ proxies (dict): Proxy settings for requests (default: {})
46
+ logging (bool): Enable fire logging (default: True)
47
+ """
48
+ self.image_gen_endpoint: str = f"https://api.deepinfra.com/v1/inference/{model}"
49
+ self.headers = {
50
+ "Accept": "application/json, text/plain, */*",
51
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
52
+ "Accept-Encoding": "gzip, deflate, br, zstd",
53
+ "User-Agent": LitAgent().random(), # Using our fire random agent! 🔥
54
+ "DNT": "1",
55
+ "Origin": "https://deepinfra.com",
56
+ "Referer": "https://deepinfra.com/",
57
+ "Sec-CH-UA": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
58
+ "Sec-CH-UA-Mobile": "?0",
59
+ "Sec-CH-UA-Platform": '"Windows"',
60
+ "Sec-Fetch-Dest": "empty",
61
+ "Sec-Fetch-Mode": "cors",
62
+ "Sec-Fetch-Site": "same-site"
63
+ }
64
+ self.session = requests.Session()
65
+ self.session.headers.update(self.headers)
66
+ self.session.proxies.update(proxies)
67
+ self.timeout = timeout
68
+ self.prompt: str = "AI-generated image - webscout"
69
+ self.image_extension: str = "png"
70
+ self.logging = logging
71
+ if self.logging:
72
+ logger.info("DeepInfraImager initialized! Ready to create some fire art! 🚀")
73
+
74
+ def generate(
75
+ self, prompt: str, amount: int = 1, additives: bool = True,
76
+ num_inference_steps: int = 25, guidance_scale: float = 7.5,
77
+ width: int = 1024, height: int = 1024, seed: int = None
78
+ ) -> List[bytes]:
79
+ """Generate some fire images from your prompt! 🎨
80
+
81
+ Args:
82
+ prompt (str): Your creative prompt
83
+ amount (int): How many images to generate
84
+ additives (bool): Add random characters to make prompts unique
85
+ num_inference_steps (int): Number of inference steps
86
+ guidance_scale (float): Guidance scale for generation
87
+ width (int): Image width
88
+ height (int): Image height
89
+ seed (int, optional): Random seed for reproducibility
90
+
91
+ Returns:
92
+ List[bytes]: Your generated images as bytes
93
+ """
94
+ assert bool(prompt), "Prompt cannot be null"
95
+ assert isinstance(amount, int), f"Amount should be an integer only not {type(amount)}"
96
+ assert amount > 0, "Amount should be greater than 0"
97
+
98
+ ads = lambda: (
99
+ ""
100
+ if not additives
101
+ else choice(punctuation)
102
+ + choice(punctuation)
103
+ + choice(punctuation)
104
+ + choice(punctuation)
105
+ + choice(punctuation)
106
+ )
107
+
108
+ if self.logging:
109
+ logger.info(f"Generating {amount} images... 🎨")
110
+
111
+ self.prompt = prompt
112
+ response = []
113
+ for _ in range(amount):
114
+ payload = {
115
+ "prompt": prompt + ads(),
116
+ "num_inference_steps": num_inference_steps,
117
+ "guidance_scale": guidance_scale,
118
+ "width": width,
119
+ "height": height,
120
+ "seed": seed if seed is not None else randint(1, 10000),
121
+ }
122
+ try:
123
+ resp = self.session.post(url=self.image_gen_endpoint, json=payload, timeout=self.timeout)
124
+ resp.raise_for_status()
125
+ # Extract base64 encoded image data and decode it
126
+ image_data = resp.json()['images'][0].split(",")[1]
127
+ image_bytes = base64.b64decode(image_data)
128
+ response.append(image_bytes)
129
+ if self.logging:
130
+ logger.success(f"Generated image {len(response)}/{amount}! 🎨")
131
+ except Exception as e:
132
+ if self.logging:
133
+ logger.error(f"Failed to generate image: {e} 😢")
134
+ raise
135
+
136
+ if self.logging:
137
+ logger.success("All images generated successfully! 🎉")
138
+ return response
139
+
140
+ def save(
141
+ self,
142
+ response: List[bytes],
143
+ name: str = None,
144
+ dir: str = os.getcwd(),
145
+ filenames_prefix: str = "",
146
+ ) -> List[str]:
147
+ """Save your fire images! 💾
148
+
149
+ Args:
150
+ response (List[bytes]): List of image data
151
+ name (str, optional): Base name for saved files
152
+ dir (str, optional): Where to save the images
153
+ filenames_prefix (str, optional): Prefix for filenames
154
+
155
+ Returns:
156
+ List[str]: List of saved filenames
157
+ """
158
+ assert isinstance(response, list), f"Response should be of {list} not {type(response)}"
159
+ name = self.prompt if name is None else name
160
+
161
+ if not os.path.exists(dir):
162
+ os.makedirs(dir)
163
+ if self.logging:
164
+ logger.info(f"Created directory: {dir} 📁")
165
+
166
+ if self.logging:
167
+ logger.info(f"Saving {len(response)} images... 💾")
168
+
169
+ filenames = []
170
+ count = 0
171
+ for image in response:
172
+ def complete_path():
173
+ count_value = "" if count == 0 else f"_{count}"
174
+ return os.path.join(dir, name + count_value + "." + self.image_extension)
175
+
176
+ while os.path.isfile(complete_path()):
177
+ count += 1
178
+
179
+ absolute_path_to_file = complete_path()
180
+ filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
181
+
182
+ with open(absolute_path_to_file, "wb") as fh:
183
+ fh.write(image)
184
+ if self.logging:
185
+ logger.success(f"Saved image to: {absolute_path_to_file} 💾")
186
+
187
+ if self.logging:
188
+ logger.success(f"All images saved successfully! Check {dir} 🎉")
189
+ return filenames
190
+
191
+
192
+ if __name__ == "__main__":
193
+ bot = DeepInfraImager()
194
+ try:
195
+ resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
196
+ print(bot.save(resp))
197
+ except Exception as e:
198
+ if bot.logging:
199
+ logger.error(f"An error occurred: {e} 😢")
@@ -0,0 +1,22 @@
1
+ """
2
+ HuggingFace Providers - Your go-to solution for generating fire images! 🔥
3
+
4
+ Examples:
5
+ >>> # Sync Usage
6
+ >>> from webscout import HFimager
7
+ >>> provider = HFimager(api_token="your-hf-token")
8
+ >>> images = provider.generate("Cool art")
9
+ >>> paths = provider.save(images)
10
+ >>>
11
+ >>> # Async Usage
12
+ >>> from webscout import AsyncHFimager
13
+ >>> async def example():
14
+ ... provider = AsyncHFimager(api_token="your-hf-token")
15
+ ... images = await provider.generate("Epic dragon")
16
+ ... paths = await provider.save(images)
17
+ """
18
+
19
+ from .sync_huggingface import HFimager
20
+ from .async_huggingface import AsyncHFimager
21
+
22
+ __all__ = ["HFimager", "AsyncHFimager"]
@@ -0,0 +1,199 @@
1
+ """
2
+ AsyncHFimager - Your go-to async provider for generating fire images with HuggingFace! ⚡
3
+
4
+ Examples:
5
+ >>> from webscout import AsyncHFimager
6
+ >>> import asyncio
7
+ >>>
8
+ >>> async def example():
9
+ ... # Initialize with your API key
10
+ ... provider = AsyncHFimager(api_token="your-hf-token")
11
+ ...
12
+ ... # Generate a single image
13
+ ... images = await provider.generate("A shiny red sports car")
14
+ ... paths = await provider.save(images)
15
+ ...
16
+ ... # Generate multiple images with parameters
17
+ ... images = await provider.generate(
18
+ ... prompt="Epic dragon in cyberpunk city",
19
+ ... amount=3,
20
+ ... model="runwayml/stable-diffusion-v1-5",
21
+ ... guidance_scale=7.5,
22
+ ... negative_prompt="blurry, bad quality",
23
+ ... num_inference_steps=50,
24
+ ... width=768,
25
+ ... height=768
26
+ ... )
27
+ ... paths = await provider.save(images, name="dragon", dir="outputs")
28
+ >>>
29
+ >>> # Run the example
30
+ >>> asyncio.run(example())
31
+ """
32
+
33
+ import os
34
+ import aiohttp
35
+ import aiofiles
36
+ import asyncio
37
+ from typing import Any, List, Optional, Dict
38
+ from webscout.AIbase import AsyncImageProvider
39
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
40
+ from webscout.litagent import LitAgent
41
+
42
+ # Initialize our fire logger and agent 🔥
43
+ logger = LitLogger(
44
+ "AsyncHuggingFace",
45
+ format=LogFormat.MODERN_EMOJI,
46
+ color_scheme=ColorScheme.CYBERPUNK
47
+ )
48
+ agent = LitAgent()
49
+
50
+ class AsyncHFimager(AsyncImageProvider):
51
+ """Your go-to async provider for generating fire images with HuggingFace! ⚡"""
52
+
53
+ def __init__(
54
+ self,
55
+ api_token: str = None,
56
+ timeout: int = 60,
57
+ proxies: dict = {},
58
+ logging: bool = True
59
+ ):
60
+ """Initialize your async HuggingFace provider with custom settings! ⚙️
61
+
62
+ Args:
63
+ api_token (str, optional): HuggingFace API token. Uses env var "HUGGINGFACE_API_TOKEN" if None
64
+ timeout (int): Request timeout in seconds (default: 60)
65
+ proxies (dict): Proxy settings for requests (default: {})
66
+ logging (bool): Enable fire logging (default: True)
67
+ """
68
+ self.base_url = "https://api-inference.huggingface.co/models/"
69
+ self.api_token = api_token or os.environ["HUGGINGFACE_API_TOKEN"]
70
+ self.headers = {
71
+ "Authorization": f"Bearer {self.api_token}",
72
+ "User-Agent": agent.random(),
73
+ "Accept": "application/json"
74
+ }
75
+ self.timeout = timeout
76
+ self.proxies = proxies
77
+ self.prompt: str = "AI-generated image - webscout"
78
+ self.image_extension: str = "jpg"
79
+ self.logging = logging
80
+ if self.logging:
81
+ logger.info("AsyncHuggingFace provider initialized! 🚀")
82
+
83
+ async def generate(
84
+ self,
85
+ prompt: str,
86
+ amount: int = 1,
87
+ model: str = "stabilityai/stable-diffusion-xl-base-1.0",
88
+ guidance_scale: Optional[float] = None,
89
+ negative_prompt: Optional[str] = None,
90
+ num_inference_steps: Optional[int] = None,
91
+ width: Optional[int] = None,
92
+ height: Optional[int] = None,
93
+ scheduler: Optional[str] = None,
94
+ seed: Optional[int] = None,
95
+ ) -> List[bytes]:
96
+ """Generate some fire images asynchronously! ⚡
97
+
98
+ Args:
99
+ prompt (str): Your lit image description
100
+ amount (int): How many images to generate (default: 1)
101
+ model (str): Which model to use (default: "stabilityai/stable-diffusion-xl-base-1.0")
102
+ guidance_scale (float, optional): Control how much to follow your prompt
103
+ negative_prompt (str, optional): What you don't want in the image
104
+ num_inference_steps (int, optional): More steps = better quality but slower
105
+ width (int, optional): Image width
106
+ height (int, optional): Image height
107
+ scheduler (str, optional): Which scheduler to use
108
+ seed (int, optional): Random seed for reproducibility
109
+
110
+ Returns:
111
+ List[bytes]: Your generated images as bytes
112
+ """
113
+ assert bool(prompt), "Yo fam, prompt can't be empty! 🚫"
114
+ assert isinstance(amount, int), f"Amount gotta be an integer, not {type(amount)} 🤔"
115
+ assert amount > 0, "Amount gotta be greater than 0! 📈"
116
+
117
+ self.prompt = prompt
118
+ response = []
119
+ if self.logging:
120
+ logger.info(f"Generating {amount} images with {model}... 🎨")
121
+
122
+ async with aiohttp.ClientSession(headers=self.headers) as session:
123
+ for _ in range(amount):
124
+ url = self.base_url + model
125
+ payload: Dict[str, Any] = {"inputs": prompt}
126
+ parameters = {}
127
+
128
+ if guidance_scale is not None:
129
+ parameters["guidance_scale"] = guidance_scale
130
+ if negative_prompt is not None:
131
+ parameters["negative_prompt"] = negative_prompt
132
+ if num_inference_steps is not None:
133
+ parameters["num_inference_steps"] = num_inference_steps
134
+ if width is not None and height is not None:
135
+ parameters["target_size"] = {"width": width, "height": height}
136
+ if scheduler is not None:
137
+ parameters["scheduler"] = scheduler
138
+ if seed is not None:
139
+ parameters["seed"] = seed
140
+
141
+ if parameters:
142
+ payload["parameters"] = parameters
143
+
144
+ try:
145
+ async with session.post(url, json=payload, timeout=self.timeout) as resp:
146
+ resp.raise_for_status()
147
+ response.append(await resp.read())
148
+ if self.logging:
149
+ logger.success("Image generated successfully! 🎉")
150
+ except aiohttp.ClientError as e:
151
+ if self.logging:
152
+ logger.error(f"Failed to generate image: {e} 😢")
153
+ raise
154
+
155
+ return response
156
+
157
+ async def save(
158
+ self,
159
+ response: List[bytes],
160
+ name: str = None,
161
+ dir: str = os.getcwd(),
162
+ filenames_prefix: str = "",
163
+ ) -> List[str]:
164
+ """Save your fire images asynchronously! 💾
165
+
166
+ Args:
167
+ response (List[bytes]): Your generated images
168
+ name (str, optional): Custom name (default: uses prompt)
169
+ dir (str, optional): Where to save (default: current directory)
170
+ filenames_prefix (str, optional): Add prefix to filenames
171
+
172
+ Returns:
173
+ List[str]: Where your images were saved
174
+ """
175
+ assert isinstance(response, list), f"Response gotta be a list, not {type(response)} 🤔"
176
+ name = self.prompt if name is None else name
177
+
178
+ filenames = []
179
+ count = 0
180
+ if self.logging:
181
+ logger.info(f"Saving {len(response)} images... 💾")
182
+
183
+ for image_bytes in response:
184
+ def complete_path():
185
+ count_value = "" if count == 0 else f"_{count}"
186
+ return os.path.join(dir, name + count_value + "." + self.image_extension)
187
+
188
+ while os.path.isfile(complete_path()):
189
+ count += 1
190
+
191
+ absolute_path_to_file = complete_path()
192
+ filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
193
+
194
+ async with aiofiles.open(absolute_path_to_file, "wb") as fh:
195
+ await fh.write(image_bytes)
196
+
197
+ if self.logging:
198
+ logger.success(f"Images saved successfully! Check {dir} 🎉")
199
+ return filenames