webscout 7.5__py3-none-any.whl → 7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (118) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/autocoder_utiles.py +0 -4
  9. webscout/Extra/autocoder/rawdog.py +13 -41
  10. webscout/Extra/gguf.py +652 -428
  11. webscout/Extra/weather.py +178 -156
  12. webscout/Extra/weather_ascii.py +70 -17
  13. webscout/Litlogger/core/logger.py +1 -2
  14. webscout/Litlogger/handlers/file.py +1 -1
  15. webscout/Litlogger/styles/formats.py +0 -2
  16. webscout/Litlogger/utils/detectors.py +0 -1
  17. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  18. webscout/Provider/AISEARCH/ISou.py +1 -1
  19. webscout/Provider/AISEARCH/felo_search.py +0 -1
  20. webscout/Provider/AllenAI.py +24 -9
  21. webscout/Provider/C4ai.py +29 -11
  22. webscout/Provider/ChatGPTGratis.py +24 -56
  23. webscout/Provider/DeepSeek.py +25 -17
  24. webscout/Provider/Deepinfra.py +115 -48
  25. webscout/Provider/Gemini.py +1 -1
  26. webscout/Provider/Glider.py +25 -8
  27. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  28. webscout/Provider/HeckAI.py +23 -7
  29. webscout/Provider/Jadve.py +20 -5
  30. webscout/Provider/Netwrck.py +42 -19
  31. webscout/Provider/PI.py +4 -2
  32. webscout/Provider/Perplexitylabs.py +26 -6
  33. webscout/Provider/PizzaGPT.py +10 -51
  34. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  35. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  36. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  37. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
  38. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
  39. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  40. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  41. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  42. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  43. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  44. webscout/Provider/TTI/__init__.py +2 -3
  45. webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
  46. webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
  47. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  48. webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
  49. webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
  50. webscout/Provider/TTS/__init__.py +2 -2
  51. webscout/Provider/TTS/deepgram.py +12 -39
  52. webscout/Provider/TTS/elevenlabs.py +14 -40
  53. webscout/Provider/TTS/gesserit.py +11 -35
  54. webscout/Provider/TTS/murfai.py +13 -39
  55. webscout/Provider/TTS/parler.py +17 -40
  56. webscout/Provider/TTS/speechma.py +180 -0
  57. webscout/Provider/TTS/streamElements.py +17 -44
  58. webscout/Provider/TextPollinationsAI.py +39 -59
  59. webscout/Provider/Venice.py +25 -8
  60. webscout/Provider/WiseCat.py +27 -5
  61. webscout/Provider/Youchat.py +64 -37
  62. webscout/Provider/__init__.py +0 -6
  63. webscout/Provider/akashgpt.py +20 -5
  64. webscout/Provider/flowith.py +20 -5
  65. webscout/Provider/freeaichat.py +32 -45
  66. webscout/Provider/koala.py +20 -5
  67. webscout/Provider/llamatutor.py +1 -1
  68. webscout/Provider/llmchat.py +30 -8
  69. webscout/Provider/multichat.py +65 -9
  70. webscout/Provider/talkai.py +1 -0
  71. webscout/Provider/turboseek.py +3 -0
  72. webscout/Provider/tutorai.py +2 -0
  73. webscout/Provider/typegpt.py +154 -64
  74. webscout/Provider/x0gpt.py +3 -1
  75. webscout/Provider/yep.py +102 -20
  76. webscout/__init__.py +3 -0
  77. webscout/cli.py +4 -40
  78. webscout/conversation.py +1 -10
  79. webscout/litagent/__init__.py +2 -2
  80. webscout/litagent/agent.py +351 -20
  81. webscout/litagent/constants.py +34 -5
  82. webscout/litprinter/__init__.py +0 -3
  83. webscout/models.py +181 -0
  84. webscout/optimizers.py +1 -1
  85. webscout/prompt_manager.py +2 -8
  86. webscout/scout/core/scout.py +1 -4
  87. webscout/scout/core/search_result.py +1 -1
  88. webscout/scout/core/text_utils.py +1 -1
  89. webscout/scout/core.py +2 -5
  90. webscout/scout/element.py +1 -1
  91. webscout/scout/parsers/html_parser.py +1 -1
  92. webscout/scout/utils.py +0 -1
  93. webscout/swiftcli/__init__.py +1 -3
  94. webscout/tempid.py +1 -1
  95. webscout/update_checker.py +1 -3
  96. webscout/version.py +1 -1
  97. webscout/webscout_search_async.py +1 -2
  98. webscout/yep_search.py +297 -297
  99. {webscout-7.5.dist-info → webscout-7.6.dist-info}/LICENSE.md +4 -4
  100. {webscout-7.5.dist-info → webscout-7.6.dist-info}/METADATA +101 -390
  101. {webscout-7.5.dist-info → webscout-7.6.dist-info}/RECORD +104 -110
  102. webscout/Extra/autollama.py +0 -231
  103. webscout/Provider/Amigo.py +0 -274
  104. webscout/Provider/Bing.py +0 -243
  105. webscout/Provider/DiscordRocks.py +0 -253
  106. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  107. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  108. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  109. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  110. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  111. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  112. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  113. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  114. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  115. webscout/Provider/TTS/voicepod.py +0 -117
  116. {webscout-7.5.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
  117. {webscout-7.5.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
  118. {webscout-7.5.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
@@ -16,22 +16,22 @@ class Netwrck(Provider):
16
16
  """
17
17
  greeting = """Hello! I'm a helpful assistant. How can I help you today?"""
18
18
 
19
- AVAILABLE_MODELS = {
20
- "lumimaid": "neversleep/llama-3-lumimaid-8b:extended",
21
- "grok": "x-ai/grok-2",
22
- "claude": "anthropic/claude-3-7-sonnet-20250219",
23
- "euryale": "sao10k/l3-euryale-70b",
24
- "gpt4mini": "openai/gpt-4o-mini",
25
- "mythomax": "gryphe/mythomax-l2-13b",
26
- "gemini": "google/gemini-pro-1.5",
27
- "nemotron": "nvidia/llama-3.1-nemotron-70b-instruct",
28
- "deepseek-r1": "deepseek/deepseek-r1",
29
- "deepseek": "deepseek/deepseek-chat",
30
- }
19
+ AVAILABLE_MODELS = [
20
+ "neversleep/llama-3-lumimaid-8b:extended",
21
+ "x-ai/grok-2",
22
+ "anthropic/claude-3-7-sonnet-20250219",
23
+ "sao10k/l3-euryale-70b",
24
+ "openai/gpt-4o-mini",
25
+ "gryphe/mythomax-l2-13b",
26
+ "google/gemini-pro-1.5",
27
+ "nvidia/llama-3.1-nemotron-70b-instruct",
28
+ "deepseek-r1",
29
+ "deepseek",
30
+ ]
31
31
 
32
32
  def __init__(
33
33
  self,
34
- model: str = "claude",
34
+ model: str = "anthropic/claude-3-7-sonnet-20250219",
35
35
  is_conversation: bool = True,
36
36
  max_tokens: int = 4096,
37
37
  timeout: int = 30,
@@ -47,10 +47,10 @@ class Netwrck(Provider):
47
47
  ):
48
48
  """Initializes the Netwrck API client."""
49
49
  if model not in self.AVAILABLE_MODELS:
50
- raise ValueError(f"Invalid model: {model}. Choose from: {list(self.AVAILABLE_MODELS.keys())}")
50
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
51
51
 
52
52
  self.model = model
53
- self.model_name = self.AVAILABLE_MODELS[model]
53
+ self.model_name = model # Use the model name directly since it's already in the correct format
54
54
  self.system_prompt = system_prompt
55
55
  self.session = requests.Session()
56
56
  self.is_conversation = is_conversation
@@ -198,7 +198,30 @@ class Netwrck(Provider):
198
198
  return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
199
199
 
200
200
  if __name__ == "__main__":
201
- from rich import print
202
-
203
- netwrck = Netwrck(model="claude")
204
- print(netwrck.chat("Hello! How are you?"))
201
+ print("-" * 80)
202
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
203
+ print("-" * 80)
204
+
205
+ # Test all available models
206
+ working = 0
207
+ total = len(Netwrck.AVAILABLE_MODELS)
208
+
209
+ for model in Netwrck.AVAILABLE_MODELS:
210
+ try:
211
+ test_ai = Netwrck(model=model, timeout=60)
212
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
213
+ response_text = ""
214
+ for chunk in response:
215
+ response_text += chunk
216
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
217
+
218
+ if response_text and len(response_text.strip()) > 0:
219
+ status = "✓"
220
+ # Truncate response if too long
221
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
222
+ else:
223
+ status = "✗"
224
+ display_text = "Empty or invalid response"
225
+ print(f"\r{model:<50} {status:<10} {display_text}")
226
+ except Exception as e:
227
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
webscout/Provider/PI.py CHANGED
@@ -18,8 +18,9 @@ class PiAI(Provider):
18
18
  Attributes:
19
19
  knowledge_cutoff (str): The knowledge cutoff date for the model
20
20
  AVAILABLE_VOICES (Dict[str, int]): Available voice options for audio responses
21
+ AVAILABLE_MODELS (List[str]): Available model options for the API
21
22
  """
22
-
23
+ AVAILABLE_MODELS = ["inflection_3_pi"]
23
24
  AVAILABLE_VOICES: Dict[str, int] = {
24
25
  "voice1": 1,
25
26
  "voice2": 2,
@@ -44,7 +45,8 @@ class PiAI(Provider):
44
45
  act: str = None,
45
46
  voice: bool = False,
46
47
  voice_name: str = "voice3",
47
- output_file: str = "PiAI.mp3"
48
+ output_file: str = "PiAI.mp3",
49
+ model: str = "inflection_3_pi",
48
50
  ):
49
51
  """
50
52
  Initializes PiAI with voice support.
@@ -386,10 +386,30 @@ class PerplexityLabs(Provider):
386
386
 
387
387
 
388
388
  if __name__ == "__main__":
389
- from rich import print
389
+ print("-" * 80)
390
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
391
+ print("-" * 80)
390
392
 
391
- # Example usage
392
- ai = PerplexityLabs(timeout=60, model="r1-1776")
393
-
394
- for chunk in ai.chat("Explain the concept of neural networks", stream=True):
395
- print(chunk, end="", flush=True)
393
+ # Test all available models
394
+ working = 0
395
+ total = len(PerplexityLabs.AVAILABLE_MODELS)
396
+
397
+ for model in PerplexityLabs.AVAILABLE_MODELS:
398
+ try:
399
+ test_ai = PerplexityLabs(model=model, timeout=60)
400
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
401
+ response_text = ""
402
+ for chunk in response:
403
+ response_text += chunk
404
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
405
+
406
+ if response_text and len(response_text.strip()) > 0:
407
+ status = "✓"
408
+ # Truncate response if too long
409
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
410
+ else:
411
+ status = "✗"
412
+ display_text = "Empty or invalid response"
413
+ print(f"\r{model:<50} {status:<10} {display_text}")
414
+ except Exception as e:
415
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -6,14 +6,14 @@ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
6
  from webscout.AIbase import Provider
7
7
  from webscout import exceptions
8
8
  from webscout import LitAgent as Lit
9
- from webscout.Litlogger import Logger, LogFormat
10
9
 
11
10
  class PIZZAGPT(Provider):
12
11
  """
13
12
  PIZZAGPT is a provider class for interacting with the PizzaGPT API.
14
13
  Supports web search integration and handles responses using regex.
15
14
  """
16
-
15
+ AVAILABLE_MODELS = ["gpt-4o-mini"]
16
+
17
17
  def __init__(
18
18
  self,
19
19
  is_conversation: bool = True,
@@ -25,10 +25,12 @@ class PIZZAGPT(Provider):
25
25
  proxies: dict = {},
26
26
  history_offset: int = 10250,
27
27
  act: str = None,
28
- logging: bool = False,
29
28
  model: str = "gpt-4o-mini"
30
29
  ) -> None:
31
30
  """Initialize PizzaGPT with enhanced configuration options."""
31
+ if model not in self.AVAILABLE_MODELS:
32
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
33
+
32
34
  self.session = requests.Session()
33
35
  self.is_conversation = is_conversation
34
36
  self.max_tokens_to_sample = max_tokens
@@ -45,7 +47,9 @@ class PIZZAGPT(Provider):
45
47
  "origin": "https://www.pizzagpt.it",
46
48
  "referer": "https://www.pizzagpt.it/en",
47
49
  "user-agent": Lit().random(),
48
- "x-secret": "Marinara"
50
+ "x-secret": "Marinara",
51
+ "sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24"',
52
+ "sec-ch-ua-platform": '"Windows"'
49
53
  }
50
54
 
51
55
  self.__available_optimizers = (
@@ -67,28 +71,15 @@ class PIZZAGPT(Provider):
67
71
  )
68
72
  self.conversation.history_offset = history_offset
69
73
  self.session.proxies = proxies
70
-
71
- self.logger = Logger(
72
- name="PIZZAGPT",
73
- format=LogFormat.MODERN_EMOJI,
74
- ) if logging else None
75
-
76
- if self.logger:
77
- self.logger.info(f"PIZZAGPT initialized with model: {self.model}")
78
74
 
79
75
  def _extract_content(self, text: str) -> Dict[str, Any]:
80
76
  """
81
77
  Extract content from response text using regex.
82
78
  """
83
- if self.logger:
84
- self.logger.debug("Extracting content from response text")
85
-
86
79
  try:
87
80
  # Look for content pattern
88
81
  content_match = re.search(r'"content"\s*:\s*"(.*?)"(?=\s*[,}])', text, re.DOTALL)
89
82
  if not content_match:
90
- if self.logger:
91
- self.logger.error("Content pattern not found in response")
92
83
  raise exceptions.FailedToGenerateResponseError("Content not found in response")
93
84
 
94
85
  content = content_match.group(1)
@@ -108,8 +99,6 @@ class PIZZAGPT(Provider):
108
99
  }
109
100
 
110
101
  except Exception as e:
111
- if self.logger:
112
- self.logger.error(f"Failed to extract content: {str(e)}")
113
102
  raise exceptions.FailedToGenerateResponseError(f"Failed to extract content: {str(e)}")
114
103
 
115
104
  def ask(
@@ -124,21 +113,13 @@ class PIZZAGPT(Provider):
124
113
  """
125
114
  Send a prompt to PizzaGPT API with optional web search capability.
126
115
  """
127
- if self.logger:
128
- self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
129
- self.logger.debug(f"Web search enabled: {web_search}")
130
-
131
116
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
132
117
  if optimizer:
133
118
  if optimizer in self.__available_optimizers:
134
119
  conversation_prompt = getattr(Optimizers, optimizer)(
135
120
  conversation_prompt if conversationally else prompt
136
121
  )
137
- if self.logger:
138
- self.logger.debug(f"Applied optimizer: {optimizer}")
139
122
  else:
140
- if self.logger:
141
- self.logger.error(f"Invalid optimizer: {optimizer}")
142
123
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
143
124
 
144
125
  payload = {
@@ -147,9 +128,6 @@ class PIZZAGPT(Provider):
147
128
  "searchEnabled": web_search
148
129
  }
149
130
 
150
- if self.logger:
151
- self.logger.debug(f"Sending payload: {json.dumps(payload, indent=2)}")
152
-
153
131
  try:
154
132
  response = self.session.post(
155
133
  self.api_endpoint,
@@ -157,26 +135,17 @@ class PIZZAGPT(Provider):
157
135
  timeout=self.timeout
158
136
  )
159
137
 
160
- if self.logger:
161
- self.logger.debug(f"Response status: {response.status_code}")
162
-
163
138
  if not response.ok:
164
- if self.logger:
165
- self.logger.error(f"API request failed: {response.status_code} - {response.reason}")
166
139
  raise exceptions.FailedToGenerateResponseError(
167
140
  f"Failed to generate response - ({response.status_code}, {response.reason})"
168
141
  )
169
142
 
170
143
  response_text = response.text
171
144
  if not response_text:
172
- if self.logger:
173
- self.logger.error("Empty response received from API")
174
145
  raise exceptions.FailedToGenerateResponseError("Empty response received from API")
175
146
 
176
147
  try:
177
148
  resp = self._extract_content(response_text)
178
- if self.logger:
179
- self.logger.debug("Response parsed successfully")
180
149
 
181
150
  self.last_response.update(dict(text=resp['content']))
182
151
  self.conversation.update_chat_history(
@@ -185,14 +154,9 @@ class PIZZAGPT(Provider):
185
154
  return self.last_response
186
155
 
187
156
  except Exception as e:
188
- if self.logger:
189
- self.logger.error(f"Failed to parse response: {str(e)}")
190
- self.logger.debug(f"Raw response text: {response_text[:500]}")
191
157
  raise exceptions.FailedToGenerateResponseError(f"Failed to parse response: {str(e)}")
192
158
 
193
159
  except requests.exceptions.RequestException as e:
194
- if self.logger:
195
- self.logger.error(f"Request failed: {str(e)}")
196
160
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
197
161
 
198
162
  def chat(
@@ -206,9 +170,6 @@ class PIZZAGPT(Provider):
206
170
  """
207
171
  Chat with PizzaGPT with optional web search capability.
208
172
  """
209
- if self.logger:
210
- self.logger.debug(f"Chat request initiated with web_search={web_search}")
211
-
212
173
  try:
213
174
  response = self.ask(
214
175
  prompt,
@@ -218,8 +179,6 @@ class PIZZAGPT(Provider):
218
179
  )
219
180
  return self.get_message(response)
220
181
  except Exception as e:
221
- if self.logger:
222
- self.logger.error(f"Chat failed: {str(e)}")
223
182
  raise
224
183
 
225
184
  def get_message(self, response: dict) -> str:
@@ -231,9 +190,9 @@ if __name__ == "__main__":
231
190
  from rich import print
232
191
 
233
192
  # Example usage with web search enabled
234
- ai = PIZZAGPT(logging=True)
193
+ ai = PIZZAGPT()
235
194
  try:
236
- response = ai.chat("Who is Founder and CEO of HelpingAI??", web_search=True)
195
+ response = ai.chat("hi")
237
196
  print(response)
238
197
  except Exception as e:
239
198
  print(f"Error: {str(e)}")
@@ -9,15 +9,8 @@ from aiohttp import ClientError
9
9
  from pathlib import Path
10
10
 
11
11
  from webscout.AIbase import AsyncImageProvider
12
- from webscout.Litlogger import Logger, LogFormat
13
12
  from webscout.litagent import LitAgent
14
13
 
15
- # Initialize our fire logger and agent 🔥
16
- logger = Logger(
17
- "AsyncAiForce",
18
- format=LogFormat.MODERN_EMOJI,
19
-
20
- )
21
14
  agent = LitAgent()
22
15
 
23
16
  class AsyncAiForceimager(AsyncImageProvider):
@@ -46,8 +39,6 @@ class AsyncAiForceimager(AsyncImageProvider):
46
39
  AVAILABLE_MODELS = [
47
40
  "stable-diffusion-xl-lightning",
48
41
  "stable-diffusion-xl-base",
49
- "Flux-1.1-Pro",
50
- "ideogram",
51
42
  "flux",
52
43
  "flux-realism",
53
44
  "flux-anime",
@@ -58,13 +49,12 @@ class AsyncAiForceimager(AsyncImageProvider):
58
49
  "any-dark"
59
50
  ]
60
51
 
61
- def __init__(self, timeout: int = 60, proxies: dict = {}, logging: bool = True):
52
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
62
53
  """Initialize your async AiForce provider with custom settings! ⚙️
63
54
 
64
55
  Args:
65
56
  timeout (int): Request timeout in seconds (default: 60)
66
57
  proxies (dict): Proxy settings for requests (default: {})
67
- logging (bool): Enable fire logging (default: True)
68
58
  """
69
59
  self.api_endpoint = "https://api.airforce/imagine2"
70
60
  self.headers = {
@@ -77,16 +67,13 @@ class AsyncAiForceimager(AsyncImageProvider):
77
67
  self.proxies = proxies
78
68
  self.prompt: str = "AI-generated image - webscout"
79
69
  self.image_extension: str = "png"
80
- self.logging = logging
81
- if self.logging:
82
- logger.info("AsyncAiForce provider initialized! 🚀")
83
70
 
84
71
  async def generate(
85
72
  self,
86
73
  prompt: str,
87
74
  amount: int = 1,
88
75
  additives: bool = True,
89
- model: str = "Flux-1.1-Pro",
76
+ model: str = "flux-3d",
90
77
  width: int = 768,
91
78
  height: int = 768,
92
79
  seed: Optional[int] = None,
@@ -104,7 +91,7 @@ class AsyncAiForceimager(AsyncImageProvider):
104
91
  ... images = await provider.generate(
105
92
  ... prompt="Epic dragon",
106
93
  ... amount=3,
107
- ... model="Flux-1.1-Pro"
94
+ ... model="flux-3d"
108
95
  ... )
109
96
 
110
97
  Args:
@@ -143,9 +130,6 @@ class AsyncAiForceimager(AsyncImageProvider):
143
130
  self.prompt = prompt
144
131
  response = []
145
132
 
146
- if self.logging:
147
- logger.info(f"Generating {amount} images with {model}... 🎨")
148
-
149
133
  async with aiohttp.ClientSession(headers=self.headers) as session:
150
134
  for i in range(amount):
151
135
  url = f"{self.api_endpoint}?model={model}&prompt={prompt}&size={width}:{height}"
@@ -157,21 +141,13 @@ class AsyncAiForceimager(AsyncImageProvider):
157
141
  async with session.get(url, timeout=self.timeout, proxy=self.proxies.get('http')) as resp:
158
142
  resp.raise_for_status()
159
143
  response.append(await resp.read())
160
- if self.logging:
161
- logger.success(f"Generated image {i + 1}/{amount}! 🎨")
162
144
  break
163
145
  except ClientError as e:
164
146
  if attempt == max_retries - 1:
165
- if self.logging:
166
- logger.error(f"Failed to generate image after {max_retries} attempts: {e} 😢")
167
147
  raise
168
148
  else:
169
- if self.logging:
170
- logger.warning(f"Attempt {attempt + 1} failed. Retrying in {retry_delay} seconds... 🔄")
171
149
  await asyncio.sleep(retry_delay)
172
150
 
173
- if self.logging:
174
- logger.success("Images generated successfully! 🎉")
175
151
  return response
176
152
 
177
153
  async def save(
@@ -209,16 +185,11 @@ class AsyncAiForceimager(AsyncImageProvider):
209
185
  save_dir = dir if dir else os.getcwd()
210
186
  if not os.path.exists(save_dir):
211
187
  os.makedirs(save_dir)
212
- if self.logging:
213
- logger.info(f"Created directory: {save_dir} 📁")
214
188
 
215
189
  name = self.prompt if name is None else name
216
190
  saved_paths = []
217
191
  timestamp = int(time.time())
218
192
 
219
- if self.logging:
220
- logger.info(f"Saving {len(response)} images... 💾")
221
-
222
193
  async def save_single_image(image_bytes: bytes, index: int) -> str:
223
194
  filename = f"{filenames_prefix}{name}_{index}.{self.image_extension}"
224
195
  filepath = os.path.join(save_dir, filename)
@@ -228,8 +199,6 @@ class AsyncAiForceimager(AsyncImageProvider):
228
199
  with open(filepath, "wb") as f:
229
200
  f.write(image_bytes)
230
201
 
231
- if self.logging:
232
- logger.success(f"Saved image to: {filepath} 💾")
233
202
  return filepath
234
203
 
235
204
  # Handle both List[bytes] and AsyncGenerator
@@ -240,8 +209,6 @@ class AsyncAiForceimager(AsyncImageProvider):
240
209
 
241
210
  tasks = [save_single_image(img, i) for i, img in enumerate(image_list)]
242
211
  saved_paths = await asyncio.gather(*tasks)
243
- if self.logging:
244
- logger.success(f"Images saved successfully! Check {dir} 🎉")
245
212
  return saved_paths
246
213
 
247
214
  if __name__ == "__main__":
@@ -252,6 +219,6 @@ if __name__ == "__main__":
252
219
  paths = await bot.save(resp)
253
220
  print(paths)
254
221
  except Exception as e:
255
- logger.error(f"An error occurred: {e} 😢")
222
+ print(f"An error occurred: {e}")
256
223
 
257
224
  asyncio.run(main())
@@ -8,15 +8,10 @@ from requests.exceptions import RequestException
8
8
  from pathlib import Path
9
9
 
10
10
  from webscout.AIbase import ImageProvider
11
- from webscout.Litlogger import Logger, LogFormat
11
+
12
12
  from webscout.litagent import LitAgent
13
13
 
14
- # Initialize our fire logger and agent 🔥
15
- logger = Logger(
16
- "AiForce",
17
- format=LogFormat.MODERN_EMOJI,
18
14
 
19
- )
20
15
  agent = LitAgent()
21
16
 
22
17
  class AiForceimager(ImageProvider):
@@ -43,8 +38,6 @@ class AiForceimager(ImageProvider):
43
38
  AVAILABLE_MODELS = [
44
39
  "stable-diffusion-xl-lightning",
45
40
  "stable-diffusion-xl-base",
46
- "Flux-1.1-Pro",
47
- "ideogram",
48
41
  "flux",
49
42
  "flux-realism",
50
43
  "flux-anime",
@@ -55,13 +48,12 @@ class AiForceimager(ImageProvider):
55
48
  "any-dark"
56
49
  ]
57
50
 
58
- def __init__(self, timeout: int = 60, proxies: dict = {}, logging: bool = True):
51
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
59
52
  """Initialize your AiForce provider with custom settings! ⚙️
60
53
 
61
54
  Args:
62
55
  timeout (int): Request timeout in seconds (default: 60)
63
56
  proxies (dict): Proxy settings for requests (default: {})
64
- logging (bool): Enable fire logging (default: True)
65
57
  """
66
58
  self.api_endpoint = "https://api.airforce/imagine2"
67
59
  self.headers = {
@@ -76,16 +68,14 @@ class AiForceimager(ImageProvider):
76
68
  self.timeout = timeout
77
69
  self.prompt: str = "AI-generated image - webscout"
78
70
  self.image_extension: str = "png"
79
- self.logging = logging
80
- if self.logging:
81
- logger.info("AiForce provider initialized! 🚀")
71
+
82
72
 
83
73
  def generate(
84
74
  self,
85
75
  prompt: str,
86
76
  amount: int = 1,
87
77
  additives: bool = True,
88
- model: str = "Flux-1.1-Pro",
78
+ model: str = "flux-3d",
89
79
  width: int = 768,
90
80
  height: int = 768,
91
81
  seed: Optional[int] = None,
@@ -139,8 +129,6 @@ class AiForceimager(ImageProvider):
139
129
  )
140
130
 
141
131
  self.prompt = prompt
142
- if self.logging:
143
- logger.info(f"Generating {amount} images with {model}... 🎨")
144
132
  response = []
145
133
  for _ in range(amount):
146
134
  url = f"{self.api_endpoint}?model={model}&prompt={prompt}&size={width}:{height}"
@@ -152,21 +140,13 @@ class AiForceimager(ImageProvider):
152
140
  resp = self.session.get(url, timeout=self.timeout)
153
141
  resp.raise_for_status()
154
142
  response.append(resp.content)
155
- if self.logging:
156
- logger.success(f"Generated image {_ + 1}/{amount}! 🎨")
157
143
  break
158
144
  except RequestException as e:
159
145
  if attempt == max_retries - 1:
160
- if self.logging:
161
- logger.error(f"Failed to generate image after {max_retries} attempts: {e} 😢")
162
146
  raise
163
147
  else:
164
- if self.logging:
165
- logger.warning(f"Attempt {attempt + 1} failed. Retrying in {retry_delay} seconds... 🔄")
166
148
  time.sleep(retry_delay)
167
149
 
168
- if self.logging:
169
- logger.success("Images generated successfully! 🎉")
170
150
  return response
171
151
 
172
152
  def save(
@@ -203,12 +183,8 @@ class AiForceimager(ImageProvider):
203
183
  save_dir = dir if dir else os.getcwd()
204
184
  if not os.path.exists(save_dir):
205
185
  os.makedirs(save_dir)
206
- if self.logging:
207
- logger.info(f"Created directory: {save_dir} 📁")
208
186
 
209
187
  name = self.prompt if name is None else name
210
- if self.logging:
211
- logger.info(f"Saving {len(response)} images... 💾")
212
188
  filenames = []
213
189
  count = 0
214
190
 
@@ -225,18 +201,45 @@ class AiForceimager(ImageProvider):
225
201
 
226
202
  with open(absolute_path_to_file, "wb") as fh:
227
203
  fh.write(image)
228
- if self.logging:
229
- logger.success(f"Saved image to: {absolute_path_to_file} 💾")
230
204
 
231
- if self.logging:
232
- logger.success(f"Images saved successfully! Check {dir} 🎉")
233
205
  return filenames
234
206
 
235
207
  if __name__ == "__main__":
236
208
  bot = AiForceimager()
237
- try:
238
- resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
239
- print(bot.save(resp))
240
- except Exception as e:
241
- if bot.logging:
242
- logger.error(f"An error occurred: {e} 😢")
209
+ test_prompt = "A shiny red sports car speeding down a scenic mountain road"
210
+
211
+ print(f"Testing all available models with prompt: '{test_prompt}'")
212
+ print("-" * 50)
213
+
214
+ # Create a directory for test images if it doesn't exist
215
+ test_dir = "model_test_images"
216
+ if not os.path.exists(test_dir):
217
+ os.makedirs(test_dir)
218
+
219
+ for model in bot.AVAILABLE_MODELS:
220
+ print(f"Testing model: {model}")
221
+ try:
222
+ # Generate an image with the current model
223
+ resp = bot.generate(
224
+ prompt=test_prompt,
225
+ amount=1,
226
+ model=model,
227
+ width=768,
228
+ height=768
229
+ )
230
+
231
+ # Save the image with model name as prefix
232
+ saved_paths = bot.save(
233
+ resp,
234
+ name=f"{model}_test",
235
+ dir=test_dir,
236
+ filenames_prefix=f"{model}_"
237
+ )
238
+
239
+ print(f"✓ Success! Saved image: {saved_paths[0]}")
240
+ except Exception as e:
241
+ print(f"✗ Failed with model {model}: {str(e)}")
242
+
243
+ print("-" * 30)
244
+
245
+ print("All model tests completed!")
@@ -1,9 +1,9 @@
1
- """
2
- FreeAI Provider Package
3
- Provides access to various AI models for image generation including DALL-E 3 and Flux models
4
- """
5
-
6
- from .sync_freeaiplayground import FreeAIImager
7
- from .async_freeaiplayground import AsyncFreeAIImager
8
-
9
- __all__ = ['FreeAIImager', 'AsyncFreeAIImager']
1
+ """
2
+ FreeAI Provider Package
3
+ Provides access to various AI models for image generation including DALL-E 3 and Flux models
4
+ """
5
+
6
+ from .sync_freeaiplayground import FreeAIImager
7
+ from .async_freeaiplayground import AsyncFreeAIImager
8
+
9
+ __all__ = ['FreeAIImager', 'AsyncFreeAIImager']