webscout 7.6__py3-none-any.whl → 7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (36) hide show
  1. webscout/Extra/autocoder/__init__.py +9 -9
  2. webscout/Extra/autocoder/autocoder_utiles.py +193 -195
  3. webscout/Extra/autocoder/rawdog.py +789 -649
  4. webscout/Extra/gguf.py +54 -24
  5. webscout/Provider/AISEARCH/ISou.py +0 -21
  6. webscout/Provider/AllenAI.py +4 -21
  7. webscout/Provider/ChatGPTClone.py +226 -0
  8. webscout/Provider/Glider.py +8 -4
  9. webscout/Provider/Hunyuan.py +272 -0
  10. webscout/Provider/LambdaChat.py +391 -0
  11. webscout/Provider/OLLAMA.py +256 -32
  12. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +18 -45
  13. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +34 -46
  14. webscout/Provider/TTI/artbit/async_artbit.py +3 -32
  15. webscout/Provider/TTI/artbit/sync_artbit.py +3 -31
  16. webscout/Provider/TTI/fastflux/async_fastflux.py +6 -2
  17. webscout/Provider/TTI/fastflux/sync_fastflux.py +7 -2
  18. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  19. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  20. webscout/Provider/WebSim.py +227 -0
  21. webscout/Provider/__init__.py +12 -1
  22. webscout/Provider/flowith.py +13 -2
  23. webscout/Provider/labyrinth.py +239 -0
  24. webscout/Provider/learnfastai.py +28 -15
  25. webscout/Provider/sonus.py +208 -0
  26. webscout/Provider/typegpt.py +1 -1
  27. webscout/Provider/uncovr.py +297 -0
  28. webscout/cli.py +49 -0
  29. webscout/litagent/agent.py +14 -9
  30. webscout/version.py +1 -1
  31. {webscout-7.6.dist-info → webscout-7.7.dist-info}/METADATA +33 -22
  32. {webscout-7.6.dist-info → webscout-7.7.dist-info}/RECORD +36 -29
  33. {webscout-7.6.dist-info → webscout-7.7.dist-info}/LICENSE.md +0 -0
  34. {webscout-7.6.dist-info → webscout-7.7.dist-info}/WHEEL +0 -0
  35. {webscout-7.6.dist-info → webscout-7.7.dist-info}/entry_points.txt +0 -0
  36. {webscout-7.6.dist-info → webscout-7.7.dist-info}/top_level.txt +0 -0
@@ -3,8 +3,12 @@ from webscout.AIutel import Conversation
3
3
  from webscout.AIutel import AwesomePrompts, sanitize_stream
4
4
  from webscout.AIbase import Provider, AsyncProvider
5
5
  from webscout import exceptions
6
- from typing import Any, AsyncGenerator, Dict
6
+ from typing import Any, AsyncGenerator, Dict, List, Optional, Union
7
7
  import ollama
8
+ from ollama import AsyncClient, Client, ResponseError
9
+ import asyncio
10
+ import base64
11
+ from pathlib import Path
8
12
 
9
13
  class OLLAMA(Provider):
10
14
  def __init__(
@@ -19,7 +23,9 @@ class OLLAMA(Provider):
19
23
  proxies: dict = {},
20
24
  history_offset: int = 10250,
21
25
  act: str = None,
22
- system_prompt: str = "You are a helpful and friendly AI assistant.",
26
+ system_prompt: str = "You are a helpful and friendly AI assistant.",
27
+ host: str = 'http://localhost:11434',
28
+ headers: Optional[Dict] = None,
23
29
  ):
24
30
  """Instantiates Ollama
25
31
 
@@ -34,7 +40,9 @@ class OLLAMA(Provider):
34
40
  proxies (dict, optional): Http request proxies. Defaults to {}.
35
41
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
36
42
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
37
- system_prompt (str, optional): System prompt for Ollama. Defaults to "You are a helpful and friendly AI assistant.".
43
+ system_prompt (str, optional): System prompt for Ollama. Defaults to "You are a helpful and friendly AI assistant.".
44
+ host (str, optional): Ollama host URL. Defaults to 'http://localhost:11434'.
45
+ headers (dict, optional): Custom headers for requests. Defaults to None.
38
46
  """
39
47
  self.model = model
40
48
  self.is_conversation = is_conversation
@@ -42,6 +50,8 @@ class OLLAMA(Provider):
42
50
  self.timeout = timeout
43
51
  self.last_response = {}
44
52
  self.system_prompt = system_prompt
53
+ self.client = Client(host=host, headers=headers)
54
+ self.async_client = AsyncClient(host=host, headers=headers)
45
55
 
46
56
  self.__available_optimizers = (
47
57
  method
@@ -67,6 +77,8 @@ class OLLAMA(Provider):
67
77
  raw: bool = False,
68
78
  optimizer: str = None,
69
79
  conversationally: bool = False,
80
+ tools: Optional[List[Dict]] = None,
81
+ images: Optional[List[str]] = None,
70
82
  ) -> dict | AsyncGenerator:
71
83
  """Chat with AI
72
84
 
@@ -76,13 +88,10 @@ class OLLAMA(Provider):
76
88
  raw (bool, optional): Stream back raw response as received. Defaults to False.
77
89
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
78
90
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
91
+ tools (List[Dict], optional): List of tools/functions to use. Defaults to None.
92
+ images (List[str], optional): List of image paths or base64 encoded images. Defaults to None.
79
93
  Returns:
80
94
  dict|AsyncGenerator : ai content
81
- ```json
82
- {
83
- "text" : "print('How may I help you today?')"
84
- }
85
- ```
86
95
  """
87
96
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
88
97
  if optimizer:
@@ -95,29 +104,101 @@ class OLLAMA(Provider):
95
104
  f"Optimizer is not one of {self.__available_optimizers}"
96
105
  )
97
106
 
98
- def for_stream():
99
- # Correctly call ollama.chat with stream=True
100
- stream = ollama.chat(model=self.model, messages=[
101
- {'role': 'system', 'content': self.system_prompt},
102
- {'role': 'user', 'content': conversation_prompt}
103
- ], stream=True)
107
+ messages = [
108
+ {'role': 'system', 'content': self.system_prompt},
109
+ {'role': 'user', 'content': conversation_prompt}
110
+ ]
104
111
 
105
- # Yield each chunk directly
106
- for chunk in stream:
107
- yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
112
+ if images:
113
+ messages[-1]['images'] = images
108
114
 
109
- def for_non_stream():
110
- response = ollama.chat(model=self.model, messages=[
111
- {'role': 'system', 'content': self.system_prompt}, # Add system message
112
- {'role': 'user', 'content': conversation_prompt}
113
- ])
114
- self.last_response.update(dict(text=response['message']['content']))
115
- self.conversation.update_chat_history(
116
- prompt, self.get_message(self.last_response)
117
- )
118
- return self.last_response
115
+ try:
116
+ def for_stream():
117
+ stream = self.client.chat(
118
+ model=self.model,
119
+ messages=messages,
120
+ stream=True,
121
+ tools=tools
122
+ )
123
+ for chunk in stream:
124
+ yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
119
125
 
120
- return for_stream() if stream else for_non_stream()
126
+ def for_non_stream():
127
+ response = self.client.chat(
128
+ model=self.model,
129
+ messages=messages,
130
+ tools=tools
131
+ )
132
+ self.last_response.update(dict(text=response['message']['content']))
133
+ self.conversation.update_chat_history(
134
+ prompt, self.get_message(self.last_response)
135
+ )
136
+ return self.last_response
137
+
138
+ return for_stream() if stream else for_non_stream()
139
+ except ResponseError as e:
140
+ if e.status_code == 404:
141
+ raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
142
+ raise e
143
+
144
+ async def aask(
145
+ self,
146
+ prompt: str,
147
+ stream: bool = False,
148
+ raw: bool = False,
149
+ optimizer: str = None,
150
+ conversationally: bool = False,
151
+ tools: Optional[List[Dict]] = None,
152
+ images: Optional[List[str]] = None,
153
+ ) -> dict | AsyncGenerator:
154
+ """Async version of ask method"""
155
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
156
+ if optimizer:
157
+ if optimizer in self.__available_optimizers:
158
+ conversation_prompt = getattr(Optimizers, optimizer)(
159
+ conversation_prompt if conversationally else prompt
160
+ )
161
+ else:
162
+ raise Exception(
163
+ f"Optimizer is not one of {self.__available_optimizers}"
164
+ )
165
+
166
+ messages = [
167
+ {'role': 'system', 'content': self.system_prompt},
168
+ {'role': 'user', 'content': conversation_prompt}
169
+ ]
170
+
171
+ if images:
172
+ messages[-1]['images'] = images
173
+
174
+ try:
175
+ async def for_stream():
176
+ stream = await self.async_client.chat(
177
+ model=self.model,
178
+ messages=messages,
179
+ stream=True,
180
+ tools=tools
181
+ )
182
+ async for chunk in stream:
183
+ yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
184
+
185
+ async def for_non_stream():
186
+ response = await self.async_client.chat(
187
+ model=self.model,
188
+ messages=messages,
189
+ tools=tools
190
+ )
191
+ self.last_response.update(dict(text=response['message']['content']))
192
+ self.conversation.update_chat_history(
193
+ prompt, self.get_message(self.last_response)
194
+ )
195
+ return self.last_response
196
+
197
+ return for_stream() if stream else for_non_stream()
198
+ except ResponseError as e:
199
+ if e.status_code == 404:
200
+ raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
201
+ raise e
121
202
 
122
203
  def chat(
123
204
  self,
@@ -125,6 +206,8 @@ class OLLAMA(Provider):
125
206
  stream: bool = False,
126
207
  optimizer: str = None,
127
208
  conversationally: bool = False,
209
+ tools: Optional[List[Dict]] = None,
210
+ images: Optional[List[str]] = None,
128
211
  ) -> str | AsyncGenerator:
129
212
  """Generate response `str`
130
213
  Args:
@@ -132,13 +215,15 @@ class OLLAMA(Provider):
132
215
  stream (bool, optional): Flag for streaming response. Defaults to False.
133
216
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
134
217
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
218
+ tools (List[Dict], optional): List of tools/functions to use. Defaults to None.
219
+ images (List[str], optional): List of image paths or base64 encoded images. Defaults to None.
135
220
  Returns:
136
221
  str: Response generated
137
222
  """
138
-
139
223
  def for_stream():
140
224
  for response in self.ask(
141
- prompt, True, optimizer=optimizer, conversationally=conversationally
225
+ prompt, True, optimizer=optimizer, conversationally=conversationally,
226
+ tools=tools, images=images
142
227
  ):
143
228
  yield self.get_message(response)
144
229
 
@@ -149,6 +234,39 @@ class OLLAMA(Provider):
149
234
  False,
150
235
  optimizer=optimizer,
151
236
  conversationally=conversationally,
237
+ tools=tools,
238
+ images=images
239
+ )
240
+ )
241
+
242
+ return for_stream() if stream else for_non_stream()
243
+
244
+ async def achat(
245
+ self,
246
+ prompt: str,
247
+ stream: bool = False,
248
+ optimizer: str = None,
249
+ conversationally: bool = False,
250
+ tools: Optional[List[Dict]] = None,
251
+ images: Optional[List[str]] = None,
252
+ ) -> str | AsyncGenerator:
253
+ """Async version of chat method"""
254
+ async def for_stream():
255
+ async for response in await self.aask(
256
+ prompt, True, optimizer=optimizer, conversationally=conversationally,
257
+ tools=tools, images=images
258
+ ):
259
+ yield self.get_message(response)
260
+
261
+ async def for_non_stream():
262
+ return self.get_message(
263
+ await self.aask(
264
+ prompt,
265
+ False,
266
+ optimizer=optimizer,
267
+ conversationally=conversationally,
268
+ tools=tools,
269
+ images=images
152
270
  )
153
271
  )
154
272
 
@@ -165,8 +283,114 @@ class OLLAMA(Provider):
165
283
  """
166
284
  assert isinstance(response, dict), "Response should be of dict data-type only"
167
285
  return response["text"]
286
+
287
+ def generate(
288
+ self,
289
+ prompt: str,
290
+ stream: bool = False,
291
+ **kwargs
292
+ ) -> dict | AsyncGenerator:
293
+ """Generate text using the model"""
294
+ try:
295
+ if stream:
296
+ return self.client.generate(model=self.model, prompt=prompt, stream=True, **kwargs)
297
+ return self.client.generate(model=self.model, prompt=prompt, **kwargs)
298
+ except ResponseError as e:
299
+ if e.status_code == 404:
300
+ raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
301
+ raise e
302
+
303
+ async def agenerate(
304
+ self,
305
+ prompt: str,
306
+ stream: bool = False,
307
+ **kwargs
308
+ ) -> dict | AsyncGenerator:
309
+ """Async version of generate method"""
310
+ try:
311
+ if stream:
312
+ return await self.async_client.generate(model=self.model, prompt=prompt, stream=True, **kwargs)
313
+ return await self.async_client.generate(model=self.model, prompt=prompt, **kwargs)
314
+ except ResponseError as e:
315
+ if e.status_code == 404:
316
+ raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
317
+ raise e
318
+
319
+ def list_models(self) -> List[dict]:
320
+ """List all available models"""
321
+ return self.client.list()
322
+
323
+ def show_model(self, model: str = None) -> dict:
324
+ """Show model details"""
325
+ model = model or self.model
326
+ return self.client.show(model)
327
+
328
+ def pull_model(self, model: str = None) -> None:
329
+ """Pull a model from Ollama"""
330
+ model = model or self.model
331
+ self.client.pull(model)
332
+
333
+ def delete_model(self, model: str = None) -> None:
334
+ """Delete a model"""
335
+ model = model or self.model
336
+ self.client.delete(model)
337
+
338
+ def embed(
339
+ self,
340
+ input: Union[str, List[str]],
341
+ model: str = None
342
+ ) -> List[float]:
343
+ """Generate embeddings for input text"""
344
+ model = model or self.model
345
+ return self.client.embed(model=model, input=input)
346
+
347
+ async def aembed(
348
+ self,
349
+ input: Union[str, List[str]],
350
+ model: str = None
351
+ ) -> List[float]:
352
+ """Async version of embed method"""
353
+ model = model or self.model
354
+ return await self.async_client.embed(model=model, input=input)
355
+
168
356
  if __name__ == "__main__":
169
- ai = OLLAMA(model="llama3.2:1b")
357
+ # Example usage
358
+ ai = OLLAMA(model="qwen2.5:0.5b")
359
+ # ai.pull_model("qwen2.5:0.5b")
360
+ # Basic chat
170
361
  response = ai.chat("write a poem about AI", stream=True)
171
362
  for chunk in response:
172
- print(chunk, end="", flush=True)
363
+ print(chunk, end="", flush=True)
364
+
365
+ # Vision example
366
+ # response = ai.chat(
367
+ # "What's in this image?",
368
+ # images=["path/to/image.jpg"]
369
+ # )
370
+ # print(response)
371
+
372
+ # Tools example
373
+ def add_numbers(a: int, b: int) -> int:
374
+ return a + b
375
+
376
+ tools = [{
377
+ 'type': 'function',
378
+ 'function': {
379
+ 'name': 'add_numbers',
380
+ 'description': 'Add two numbers',
381
+ 'parameters': {
382
+ 'type': 'object',
383
+ 'properties': {
384
+ 'a': {'type': 'integer'},
385
+ 'b': {'type': 'integer'}
386
+ },
387
+ 'required': ['a', 'b']
388
+ }
389
+ }
390
+ }]
391
+
392
+ response = ai.chat(
393
+ "What is 5 plus 3?",
394
+ tools=tools
395
+ )
396
+ print(response)
@@ -7,11 +7,7 @@ from random import choice
7
7
  import aiofiles
8
8
 
9
9
  from webscout.AIbase import AsyncImageProvider
10
- from webscout.litagent import LitAgent # Import our fire user agent generator 🔥
11
- from webscout.Litlogger import Logger # For that cyberpunk logging swag ⚡
12
-
13
- # Initialize our fire logger 🚀
14
- logger = Logger("AsyncFreeAIPlayground")
10
+ from webscout.litagent import LitAgent
15
11
 
16
12
  class AsyncFreeAIImager(AsyncImageProvider):
17
13
  """
@@ -30,10 +26,9 @@ class AsyncFreeAIImager(AsyncImageProvider):
30
26
 
31
27
  def __init__(
32
28
  self,
33
- model: str = "dall-e-3", # Updated default model
29
+ model: str = "dall-e-3",
34
30
  timeout: int = 60,
35
- proxies: dict = {},
36
- logging: bool = True
31
+ proxies: dict = {}
37
32
  ):
38
33
  """Initialize your async FreeAIPlayground provider with custom settings! ⚙️
39
34
 
@@ -41,14 +36,13 @@ class AsyncFreeAIImager(AsyncImageProvider):
41
36
  model (str): Which model to use (default: dall-e-3)
42
37
  timeout (int): Request timeout in seconds (default: 60)
43
38
  proxies (dict): Proxy settings for requests (default: {})
44
- logging (bool): Enable fire logging (default: True)
45
39
  """
46
40
  self.image_gen_endpoint: str = "https://api.freeaichatplayground.com/v1/images/generations"
47
41
  self.headers = {
48
42
  "Accept": "application/json",
49
43
  "Accept-Language": "en-US,en;q=0.9",
50
44
  "Content-Type": "application/json",
51
- "User-Agent": LitAgent().random(), # Using our fire random agent! 🔥
45
+ "User-Agent": LitAgent().random(),
52
46
  "Origin": "https://freeaichatplayground.com",
53
47
  "Referer": "https://freeaichatplayground.com/",
54
48
  }
@@ -57,9 +51,6 @@ class AsyncFreeAIImager(AsyncImageProvider):
57
51
  self.proxies = proxies
58
52
  self.prompt: str = "AI-generated image - webscout"
59
53
  self.image_extension: str = "png"
60
- self.logging = logging
61
- if self.logging:
62
- logger.info("AsyncFreeAIPlayground initialized! Ready to create some fire art! 🚀")
63
54
 
64
55
  async def generate(
65
56
  self, prompt: str, amount: int = 1, additives: bool = True,
@@ -93,9 +84,6 @@ class AsyncFreeAIImager(AsyncImageProvider):
93
84
  + choice(punctuation)
94
85
  )
95
86
 
96
- if self.logging:
97
- logger.info(f"Generating {amount} images... 🎨")
98
-
99
87
  self.prompt = prompt
100
88
  response = []
101
89
 
@@ -115,28 +103,24 @@ class AsyncFreeAIImager(AsyncImageProvider):
115
103
  async with session.post(self.image_gen_endpoint, json=payload) as resp:
116
104
  resp.raise_for_status()
117
105
  data = await resp.json()
118
- image_url = data['data'][0]['url']
119
-
120
- # Get the image data from the URL
121
- async with session.get(image_url) as img_resp:
122
- img_resp.raise_for_status()
123
- image_bytes = await img_resp.read()
124
- response.append(image_bytes)
106
+ if 'data' in data and len(data['data']) > 0:
107
+ image_url = data['data'][0]['url']
125
108
 
126
- if self.logging:
127
- logger.success(f"Generated image {len(response)}/{amount}! 🎨")
128
- break
109
+ async with session.get(image_url) as img_resp:
110
+ img_resp.raise_for_status()
111
+ image_bytes = await img_resp.read()
112
+ response.append(image_bytes)
113
+ break
114
+ else:
115
+ print(f"Warning: No image data in response: {data}")
116
+ if attempt == max_retries - 1:
117
+ raise Exception("No image data received after all retries")
129
118
  except Exception as e:
119
+ print(f"Error generating image (attempt {attempt + 1}/{max_retries}): {str(e)}")
130
120
  if attempt == max_retries - 1:
131
- if self.logging:
132
- logger.error(f"Failed to generate image after {max_retries} attempts: {e} 😢")
133
121
  raise
134
- if self.logging:
135
- logger.warning(f"Attempt {attempt + 1} failed, retrying in {retry_delay}s... 🔄")
136
122
  await asyncio.sleep(retry_delay)
137
123
 
138
- if self.logging:
139
- logger.success("All images generated successfully! 🎉")
140
124
  return response
141
125
 
142
126
  async def save(
@@ -159,8 +143,6 @@ class AsyncFreeAIImager(AsyncImageProvider):
159
143
  """
160
144
  if not os.path.exists(dir):
161
145
  os.makedirs(dir)
162
- if self.logging:
163
- logger.info(f"Created directory: {dir} 📁")
164
146
 
165
147
  name = self.prompt if name is None else name
166
148
  saved_paths = []
@@ -171,9 +153,6 @@ class AsyncFreeAIImager(AsyncImageProvider):
171
153
 
172
154
  async with aiofiles.open(filepath, "wb") as f:
173
155
  await f.write(image_bytes)
174
-
175
- if self.logging:
176
- logger.success(f"Saved image to: {filepath} 💾")
177
156
  return filename
178
157
 
179
158
  if isinstance(response, list):
@@ -181,14 +160,9 @@ class AsyncFreeAIImager(AsyncImageProvider):
181
160
  else:
182
161
  image_list = [chunk async for chunk in response]
183
162
 
184
- if self.logging:
185
- logger.info(f"Saving {len(image_list)} images... 💾")
186
-
187
163
  tasks = [save_single_image(img, i) for i, img in enumerate(image_list)]
188
164
  saved_paths = await asyncio.gather(*tasks)
189
165
 
190
- if self.logging:
191
- logger.success(f"All images saved successfully! Check {dir} 🎉")
192
166
  return saved_paths
193
167
 
194
168
 
@@ -199,8 +173,7 @@ if __name__ == "__main__":
199
173
  resp = await bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
200
174
  paths = await bot.save(resp)
201
175
  print(paths)
202
- except Exception as e:
203
- if bot.logging:
204
- logger.error(f"An error occurred: {e} 😢")
176
+ except Exception:
177
+ pass
205
178
 
206
179
  asyncio.run(main())
@@ -8,10 +8,6 @@ import base64
8
8
 
9
9
  from webscout.AIbase import ImageProvider
10
10
  from webscout.litagent import LitAgent # Import our fire user agent generator 🔥
11
- from webscout.Litlogger import Logger # For that cyberpunk logging swag ⚡
12
-
13
- # Initialize our fire logger 🚀
14
- logger = Logger("FreeAIPlayground")
15
11
 
16
12
  class FreeAIImager(ImageProvider):
17
13
  """
@@ -48,7 +44,7 @@ class FreeAIImager(ImageProvider):
48
44
  "Accept": "application/json",
49
45
  "Accept-Language": "en-US,en;q=0.9",
50
46
  "Content-Type": "application/json",
51
- "User-Agent": LitAgent().random(), # Using our fire random agent! 🔥
47
+ "User-Agent": LitAgent().random(),
52
48
  "Origin": "https://freeaichatplayground.com",
53
49
  "Referer": "https://freeaichatplayground.com/",
54
50
  }
@@ -59,14 +55,11 @@ class FreeAIImager(ImageProvider):
59
55
  self.model = model
60
56
  self.prompt: str = "AI-generated image - webscout"
61
57
  self.image_extension: str = "png"
62
- self.logging = logging
63
- if self.logging:
64
- logger.info("FreeAIPlayground initialized! Ready to create some fire art! 🚀")
65
58
 
66
59
  def generate(
67
60
  self, prompt: str, amount: int = 1, additives: bool = True,
68
61
  size: str = "1024x1024", quality: str = "standard",
69
- style: str = "vivid"
62
+ style: str = "vivid", max_retries: int = 3, retry_delay: int = 5
70
63
  ) -> List[bytes]:
71
64
  """Generate some fire images from your prompt! 🎨
72
65
 
@@ -77,6 +70,8 @@ class FreeAIImager(ImageProvider):
77
70
  size (str): Image size (1024x1024, 1024x1792, 1792x1024)
78
71
  quality (str): Image quality (standard, hd)
79
72
  style (str): Image style (vivid, natural)
73
+ max_retries (int): Max retry attempts if generation fails
74
+ retry_delay (int): Delay between retries in seconds
80
75
 
81
76
  Returns:
82
77
  List[bytes]: Your generated images as bytes
@@ -93,9 +88,6 @@ class FreeAIImager(ImageProvider):
93
88
  + choice(punctuation)
94
89
  )
95
90
 
96
- if self.logging:
97
- logger.info(f"Generating {amount} images... 🎨")
98
-
99
91
  self.prompt = prompt
100
92
  response = []
101
93
  for _ in range(amount):
@@ -107,27 +99,33 @@ class FreeAIImager(ImageProvider):
107
99
  "quality": quality,
108
100
  "style": style
109
101
  }
110
- try:
111
- resp = self.session.post(
112
- url=self.image_gen_endpoint,
113
- json=payload,
114
- timeout=self.timeout
115
- )
116
- resp.raise_for_status()
117
- image_url = resp.json()['data'][0]['url']
118
- # Get the image data from the URL
119
- img_resp = self.session.get(image_url, timeout=self.timeout)
120
- img_resp.raise_for_status()
121
- response.append(img_resp.content)
122
- if self.logging:
123
- logger.success(f"Generated image {len(response)}/{amount}! 🎨")
124
- except Exception as e:
125
- if self.logging:
126
- logger.error(f"Failed to generate image: {e} 😢")
127
- raise
128
-
129
- if self.logging:
130
- logger.success("All images generated successfully! 🎉")
102
+
103
+ for attempt in range(max_retries):
104
+ try:
105
+ resp = self.session.post(
106
+ url=self.image_gen_endpoint,
107
+ json=payload,
108
+ timeout=self.timeout
109
+ )
110
+ resp.raise_for_status()
111
+ response_data = resp.json()
112
+ if 'data' in response_data and len(response_data['data']) > 0:
113
+ image_url = response_data['data'][0]['url']
114
+ # Get the image data from the URL
115
+ img_resp = self.session.get(image_url, timeout=self.timeout)
116
+ img_resp.raise_for_status()
117
+ response.append(img_resp.content)
118
+ break
119
+ else:
120
+ print(f"Warning: No image data in response: {response_data}")
121
+ if attempt == max_retries - 1:
122
+ raise Exception("No image data received after all retries")
123
+ except Exception as e:
124
+ print(f"Error generating image (attempt {attempt + 1}/{max_retries}): {str(e)}")
125
+ if attempt == max_retries - 1:
126
+ raise
127
+ import time
128
+ time.sleep(retry_delay)
131
129
  return response
132
130
 
133
131
  def save(
@@ -153,11 +151,6 @@ class FreeAIImager(ImageProvider):
153
151
 
154
152
  if not os.path.exists(dir):
155
153
  os.makedirs(dir)
156
- if self.logging:
157
- logger.info(f"Created directory: {dir} 📁")
158
-
159
- if self.logging:
160
- logger.info(f"Saving {len(response)} images... 💾")
161
154
 
162
155
  filenames = []
163
156
  count = 0
@@ -174,11 +167,6 @@ class FreeAIImager(ImageProvider):
174
167
 
175
168
  with open(absolute_path_to_file, "wb") as fh:
176
169
  fh.write(image)
177
- if self.logging:
178
- logger.success(f"Saved image to: {absolute_path_to_file} 💾")
179
-
180
- if self.logging:
181
- logger.success(f"All images saved successfully! Check {dir} 🎉")
182
170
  return filenames
183
171
 
184
172
 
@@ -187,6 +175,6 @@ if __name__ == "__main__":
187
175
  try:
188
176
  resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
189
177
  print(bot.save(resp))
190
- except Exception as e:
191
- if bot.logging:
192
- logger.error(f"An error occurred: {e} 😢")
178
+ except Exception:
179
+ pass
180
+