webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (94) hide show
  1. webscout/AIutel.py +146 -37
  2. webscout/Bing_search.py +1 -2
  3. webscout/Provider/AISEARCH/__init__.py +1 -0
  4. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  5. webscout/Provider/ExaChat.py +84 -58
  6. webscout/Provider/HeckAI.py +85 -80
  7. webscout/Provider/Jadve.py +56 -50
  8. webscout/Provider/MiniMax.py +207 -0
  9. webscout/Provider/Nemotron.py +41 -13
  10. webscout/Provider/Netwrck.py +34 -51
  11. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  12. webscout/Provider/OPENAI/MiniMax.py +298 -0
  13. webscout/Provider/OPENAI/README.md +30 -29
  14. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  15. webscout/Provider/OPENAI/__init__.py +3 -1
  16. webscout/Provider/OPENAI/autoproxy.py +752 -17
  17. webscout/Provider/OPENAI/base.py +7 -76
  18. webscout/Provider/OPENAI/deepinfra.py +42 -108
  19. webscout/Provider/OPENAI/flowith.py +179 -166
  20. webscout/Provider/OPENAI/friendli.py +233 -0
  21. webscout/Provider/OPENAI/monochat.py +329 -0
  22. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  23. webscout/Provider/OPENAI/toolbaz.py +1 -0
  24. webscout/Provider/OPENAI/typegpt.py +1 -1
  25. webscout/Provider/OPENAI/utils.py +19 -42
  26. webscout/Provider/OPENAI/x0gpt.py +14 -2
  27. webscout/Provider/OpenGPT.py +54 -32
  28. webscout/Provider/PI.py +58 -84
  29. webscout/Provider/StandardInput.py +32 -13
  30. webscout/Provider/TTI/README.md +9 -9
  31. webscout/Provider/TTI/__init__.py +2 -1
  32. webscout/Provider/TTI/aiarta.py +92 -78
  33. webscout/Provider/TTI/infip.py +212 -0
  34. webscout/Provider/TTI/monochat.py +220 -0
  35. webscout/Provider/TeachAnything.py +11 -3
  36. webscout/Provider/TextPollinationsAI.py +78 -70
  37. webscout/Provider/TogetherAI.py +32 -48
  38. webscout/Provider/Venice.py +37 -46
  39. webscout/Provider/VercelAI.py +27 -24
  40. webscout/Provider/WiseCat.py +35 -35
  41. webscout/Provider/WrDoChat.py +22 -26
  42. webscout/Provider/WritingMate.py +26 -22
  43. webscout/Provider/__init__.py +2 -2
  44. webscout/Provider/granite.py +48 -57
  45. webscout/Provider/koala.py +51 -39
  46. webscout/Provider/learnfastai.py +49 -64
  47. webscout/Provider/llmchat.py +79 -93
  48. webscout/Provider/llmchatco.py +63 -78
  49. webscout/Provider/multichat.py +51 -40
  50. webscout/Provider/oivscode.py +1 -1
  51. webscout/Provider/scira_chat.py +159 -96
  52. webscout/Provider/scnet.py +13 -13
  53. webscout/Provider/searchchat.py +13 -13
  54. webscout/Provider/sonus.py +12 -11
  55. webscout/Provider/toolbaz.py +25 -8
  56. webscout/Provider/turboseek.py +41 -42
  57. webscout/Provider/typefully.py +27 -12
  58. webscout/Provider/typegpt.py +41 -46
  59. webscout/Provider/uncovr.py +55 -90
  60. webscout/Provider/x0gpt.py +33 -17
  61. webscout/Provider/yep.py +79 -96
  62. webscout/auth/__init__.py +12 -1
  63. webscout/auth/providers.py +27 -5
  64. webscout/auth/routes.py +128 -104
  65. webscout/auth/server.py +367 -312
  66. webscout/client.py +121 -116
  67. webscout/litagent/Readme.md +68 -55
  68. webscout/litagent/agent.py +99 -9
  69. webscout/version.py +1 -1
  70. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
  71. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
  72. webscout/Provider/TTI/fastflux.py +0 -233
  73. webscout/Provider/Writecream.py +0 -246
  74. webscout/auth/static/favicon.svg +0 -11
  75. webscout/auth/swagger_ui.py +0 -203
  76. webscout/auth/templates/components/authentication.html +0 -237
  77. webscout/auth/templates/components/base.html +0 -103
  78. webscout/auth/templates/components/endpoints.html +0 -750
  79. webscout/auth/templates/components/examples.html +0 -491
  80. webscout/auth/templates/components/footer.html +0 -75
  81. webscout/auth/templates/components/header.html +0 -27
  82. webscout/auth/templates/components/models.html +0 -286
  83. webscout/auth/templates/components/navigation.html +0 -70
  84. webscout/auth/templates/static/api.js +0 -455
  85. webscout/auth/templates/static/icons.js +0 -168
  86. webscout/auth/templates/static/main.js +0 -784
  87. webscout/auth/templates/static/particles.js +0 -201
  88. webscout/auth/templates/static/styles.css +0 -3353
  89. webscout/auth/templates/static/ui.js +0 -374
  90. webscout/auth/templates/swagger_ui.html +0 -170
  91. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  92. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
  93. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  94. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -87,7 +87,7 @@ class OpenGPT(Provider):
87
87
  def ask(
88
88
  self,
89
89
  prompt: str,
90
- stream: bool = False, # Note: API does not support streaming
90
+ stream: bool = False, # Note: API does not support streaming natively
91
91
  raw: bool = False,
92
92
  optimizer: str = None,
93
93
  conversationally: bool = False,
@@ -121,38 +121,54 @@ class OpenGPT(Provider):
121
121
  "id": self.app_id,
122
122
  "userKey": "" # Assuming userKey is meant to be empty as in the original code
123
123
  }
124
-
125
- # API does not stream, implement non-stream logic directly
124
+
125
+ def for_stream():
126
+ try:
127
+ response = self.session.post(
128
+ "https://open-gpt.app/api/generate",
129
+ data=json.dumps(payload),
130
+ timeout=self.timeout,
131
+ impersonate="chrome110"
132
+ )
133
+ response.raise_for_status()
134
+ response_text = response.text
135
+ buffer = ""
136
+ chunk_size = 32
137
+ for i in range(0, len(response_text), chunk_size):
138
+ out = response_text[i:i+chunk_size]
139
+ if out.strip():
140
+ if raw:
141
+ yield out
142
+ else:
143
+ yield {"text": out}
144
+ self.last_response = {"text": response_text}
145
+ self.conversation.update_chat_history(prompt, response_text)
146
+ except CurlError as e:
147
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
148
+ except Exception as e:
149
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
150
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
151
+
126
152
  def for_non_stream():
127
153
  try:
128
- # Use curl_cffi session post with impersonate
129
154
  response = self.session.post(
130
155
  "https://open-gpt.app/api/generate",
131
- # headers are set on the session
132
- data=json.dumps(payload), # Keep data as JSON string
156
+ data=json.dumps(payload),
133
157
  timeout=self.timeout,
134
- # proxies are set on the session
135
- impersonate="chrome110" # Use a common impersonation profile
158
+ impersonate="chrome110"
136
159
  )
137
-
138
- response.raise_for_status() # Check for HTTP errors
139
-
140
- # Use response.text which is already decoded
160
+ response.raise_for_status()
141
161
  response_text = response.text
142
162
  self.last_response = {"text": response_text}
143
163
  self.conversation.update_chat_history(prompt, response_text)
144
-
145
- # Return dict or raw string based on raw flag
146
164
  return {"raw": response_text} if raw else {"text": response_text}
147
-
148
- except CurlError as e: # Catch CurlError
165
+ except CurlError as e:
149
166
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
150
- except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
167
+ except Exception as e:
151
168
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
152
169
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
153
-
154
- # This provider doesn't support streaming, so just return non-stream
155
- return for_non_stream()
170
+
171
+ return for_stream() if stream else for_non_stream()
156
172
 
157
173
  def chat(
158
174
  self,
@@ -173,21 +189,25 @@ class OpenGPT(Provider):
173
189
  Returns:
174
190
  A string with the response text.
175
191
  """
176
- # Since ask() now handles both stream=True/False by returning the full response dict:
177
- response_data = self.ask(
178
- prompt,
179
- stream=False, # Call ask in non-stream mode internally
180
- raw=False, # Ensure ask returns dict with 'text' key
181
- optimizer=optimizer,
182
- conversationally=conversationally
183
- )
184
- # If stream=True was requested, simulate streaming by yielding the full message at once
185
192
  if stream:
186
193
  def stream_wrapper():
187
- yield self.get_message(response_data) # yield only the text string
194
+ for part in self.ask(
195
+ prompt,
196
+ stream=True,
197
+ raw=False,
198
+ optimizer=optimizer,
199
+ conversationally=conversationally
200
+ ):
201
+ yield self.get_message(part) if isinstance(part, dict) else part
188
202
  return stream_wrapper()
189
203
  else:
190
- # If stream=False, return the full message directly
204
+ response_data = self.ask(
205
+ prompt,
206
+ stream=False,
207
+ raw=False,
208
+ optimizer=optimizer,
209
+ conversationally=conversationally
210
+ )
191
211
  return self.get_message(response_data)
192
212
 
193
213
  def get_message(self, response: dict) -> str:
@@ -206,4 +226,6 @@ class OpenGPT(Provider):
206
226
 
207
227
  if __name__ == "__main__":
208
228
  ai = OpenGPT()
209
- print(ai.chat("Hello, how are you?"))
229
+ response = ai.chat("write me about humans in points", stream=True)
230
+ for part in response:
231
+ print(part, end="", flush=True)
webscout/Provider/PI.py CHANGED
@@ -172,9 +172,10 @@ class PiAI(Provider):
172
172
  voice: bool = None,
173
173
  voice_name: str = None,
174
174
  output_file: str = None
175
- ) -> dict:
175
+ ) -> Union[dict, str, Any]:
176
176
  """
177
177
  Interact with Pi.ai by sending a prompt and receiving a response.
178
+ Now supports raw streaming and non-streaming output, matching the pattern in other providers.
178
179
 
179
180
  Args:
180
181
  prompt (str): The prompt to send
@@ -209,103 +210,82 @@ class PiAI(Provider):
209
210
  }
210
211
 
211
212
  def process_stream():
212
- try: # Add outer try block for error handling
213
- # Try primary URL first
213
+ try:
214
214
  current_url = self.url
215
215
  response = self.session.post(
216
216
  current_url,
217
- # headers are set on the session
218
- # cookies are handled by the session
219
217
  json=data,
220
218
  stream=True,
221
219
  timeout=self.timeout,
222
- # proxies are set on the session
223
- impersonate="chrome110" # Use a common impersonation profile
220
+ impersonate="chrome110"
224
221
  )
225
-
226
- # If primary URL fails, try fallback URL
227
222
  if not response.ok and current_url == self.primary_url:
228
223
  current_url = self.fallback_url
229
224
  response = self.session.post(
230
225
  current_url,
231
- # headers are set on the session
232
- # cookies are handled by the session
233
226
  json=data,
234
227
  stream=True,
235
228
  timeout=self.timeout,
236
- # proxies are set on the session
237
- impersonate="chrome110" # Use a common impersonation profile
229
+ impersonate="chrome110"
238
230
  )
239
-
240
- response.raise_for_status() # Check for HTTP errors after potential fallback
241
-
242
- # --- Process response content ---
243
- # Note: curl_cffi's response.content might behave differently for streams.
244
- # It's often better to iterate directly.
245
- # output_str = response.content.decode('utf-8') # Avoid reading full content at once for streams
231
+ response.raise_for_status()
246
232
 
247
233
  sids = []
248
234
  streaming_text = ""
249
- full_raw_data_for_sids = "" # Accumulate raw data to find SIDs later
250
-
251
- # Iterate over bytes and decode manually
235
+ full_raw_data_for_sids = ""
236
+
237
+ processed_stream = sanitize_stream(
238
+ data=response.iter_lines(),
239
+ intro_value="data: ",
240
+ to_json=True,
241
+ content_extractor=self._pi_extractor,
242
+ raw=raw
243
+ )
244
+ for content in processed_stream:
245
+ if raw:
246
+ yield content
247
+ else:
248
+ if content and isinstance(content, str):
249
+ streaming_text += content
250
+ yield {"text": streaming_text}
251
+ # SID extraction for voice
252
252
  for line_bytes in response.iter_lines():
253
253
  if line_bytes:
254
254
  line = line_bytes.decode('utf-8')
255
- full_raw_data_for_sids += line + "\n" # Accumulate for SID extraction
256
-
257
- if line.startswith("data: "):
258
- json_line_str = line[6:] # Get the JSON part as string
259
- try:
260
- # Process this single JSON line string with sanitize_stream
261
- processed_gen = sanitize_stream(
262
- data=json_line_str,
263
- to_json=True,
264
- content_extractor=self._pi_extractor
265
- )
266
- chunk_text = next(processed_gen, None) # Get the single extracted text item
267
- if chunk_text and isinstance(chunk_text, str):
268
- streaming_text += chunk_text
269
- yield {"text": streaming_text} # Always yield dict with aggregated text
270
- except (StopIteration, json.JSONDecodeError, UnicodeDecodeError):
271
- continue # Skip if sanitize_stream fails or yields nothing
272
- # Extract SIDs after processing the stream
255
+ full_raw_data_for_sids += line + "\n"
273
256
  sids = re.findall(r'"sid":"(.*?)"', full_raw_data_for_sids)
274
257
  second_sid = sids[1] if len(sids) >= 2 else None
275
-
276
258
  if voice and voice_name and second_sid:
277
259
  threading.Thread(
278
260
  target=self.download_audio_threaded,
279
261
  args=(voice_name, second_sid, output_file)
280
262
  ).start()
281
-
282
- # Update history and last response after stream finishes
283
- self.last_response = dict(text=streaming_text)
284
- self.conversation.update_chat_history(
285
- prompt, streaming_text
286
- )
287
-
288
- except CurlError as e: # Catch CurlError
263
+ if not raw:
264
+ self.last_response = dict(text=streaming_text)
265
+ self.conversation.update_chat_history(prompt, streaming_text)
266
+ except CurlError as e:
289
267
  raise exceptions.FailedToGenerateResponseError(f"API request failed (CurlError): {e}") from e
290
- except Exception as e: # Catch other potential exceptions (like HTTPError)
268
+ except Exception as e:
291
269
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
292
270
  raise exceptions.FailedToGenerateResponseError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
293
271
 
294
-
295
272
  if stream:
296
273
  return process_stream()
297
274
  else:
298
- # For non-stream, collect all responses and return the final one
299
- final_text = ""
300
- # process_stream always yields dicts now
301
- for res in process_stream():
302
- if isinstance(res, dict) and "text" in res:
303
- final_text = res["text"] # Keep updating with the latest aggregated text
304
-
305
- # last_response and history are updated within process_stream
306
- # Return the final aggregated response dict or raw text
307
- return final_text if raw else self.last_response
308
-
275
+ full_response = ""
276
+ for chunk in process_stream():
277
+ if raw:
278
+ if isinstance(chunk, str):
279
+ full_response += chunk
280
+ else:
281
+ if isinstance(chunk, dict) and "text" in chunk:
282
+ full_response = chunk["text"]
283
+ if not raw:
284
+ self.last_response = {"text": full_response}
285
+ self.conversation.update_chat_history(prompt, full_response)
286
+ return self.last_response
287
+ else:
288
+ return full_response
309
289
 
310
290
  def chat(
311
291
  self,
@@ -315,8 +295,9 @@ class PiAI(Provider):
315
295
  conversationally: bool = False,
316
296
  voice: bool = None,
317
297
  voice_name: str = None,
318
- output_file: str = None
319
- ) -> str:
298
+ output_file: str = None,
299
+ raw: bool = False, # Added raw parameter
300
+ ) -> Union[str, Any]:
320
301
  """
321
302
  Generates a response based on the provided prompt.
322
303
 
@@ -339,35 +320,37 @@ class PiAI(Provider):
339
320
 
340
321
  if stream:
341
322
  def stream_generator():
342
- # ask() yields dicts or raw JSON objects when streaming
343
323
  gen = self.ask(
344
324
  prompt,
345
325
  stream=True,
346
- raw=False, # Ensure ask yields dicts for get_message
326
+ raw=raw,
347
327
  optimizer=optimizer,
348
328
  conversationally=conversationally,
349
329
  voice=voice,
350
330
  voice_name=voice_name,
351
331
  output_file=output_file
352
332
  )
353
- for response_dict in gen:
354
- # get_message expects dict
355
- yield self.get_message(response_dict)
333
+ for response in gen:
334
+ if raw:
335
+ yield response
336
+ else:
337
+ yield self.get_message(response)
356
338
  return stream_generator()
357
339
  else:
358
- # ask() returns dict or raw text when not streaming
359
340
  response_data = self.ask(
360
341
  prompt,
361
342
  stream=False,
362
- raw=False, # Ensure ask returns dict for get_message
343
+ raw=raw,
363
344
  optimizer=optimizer,
364
345
  conversationally=conversationally,
365
346
  voice=voice,
366
347
  voice_name=voice_name,
367
348
  output_file=output_file
368
349
  )
369
- # get_message expects dict
370
- return self.get_message(response_data)
350
+ if raw:
351
+ return response_data
352
+ else:
353
+ return self.get_message(response_data)
371
354
 
372
355
  def get_message(self, response: dict) -> str:
373
356
  """Retrieves message only from response"""
@@ -411,19 +394,10 @@ if __name__ == '__main__':
411
394
  try: # Add try-except block for testing
412
395
  ai = PiAI(timeout=60)
413
396
  print("[bold blue]Testing Chat (Stream):[/bold blue]")
414
- response = ai.chat(input(">>> "), stream=True)
397
+ response = ai.chat("hi", stream=True, raw=False)
415
398
  full_response = ""
416
399
  for chunk in response:
417
400
  print(chunk, end="", flush=True)
418
- full_response += chunk
419
- print("\n[bold green]Stream Test Complete.[/bold green]")
420
-
421
- # Optional: Test non-stream
422
- # print("\n[bold blue]Testing Chat (Non-Stream):[/bold blue]")
423
- # response_non_stream = ai.chat("Hello again", stream=False)
424
- # print(response_non_stream)
425
- # print("[bold green]Non-Stream Test Complete.[/bold green]")
426
-
427
401
  except exceptions.FailedToGenerateResponseError as e:
428
402
  print(f"\n[bold red]API Error:[/bold red] {e}")
429
403
  except Exception as e:
@@ -1,7 +1,7 @@
1
1
  from curl_cffi.requests import Session
2
2
  import uuid
3
3
  import re
4
- from typing import Any, Dict, Optional, Union
4
+ from typing import Any, Dict, Generator, Optional, Union
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
@@ -166,7 +166,8 @@ class StandardInputAI(Provider):
166
166
  prompt: str,
167
167
  optimizer: str = None,
168
168
  conversationally: bool = False,
169
- ) -> Dict[str, Any]:
169
+ raw: bool = False, # Added raw parameter
170
+ ) -> Union[Dict[str, Any], Generator[str, None, None]]:
170
171
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
171
172
  if optimizer:
172
173
  if optimizer in self.__available_optimizers:
@@ -233,9 +234,12 @@ class StandardInputAI(Provider):
233
234
  for content_chunk in processed_stream:
234
235
  if content_chunk and isinstance(content_chunk, str):
235
236
  full_response += content_chunk
236
-
237
+ if raw:
238
+ yield content_chunk
237
239
  self.last_response = {"text": full_response}
238
240
  self.conversation.update_chat_history(prompt, full_response)
241
+ if raw:
242
+ return full_response
239
243
  return {"text": full_response}
240
244
  except Exception as e:
241
245
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
@@ -245,17 +249,32 @@ class StandardInputAI(Provider):
245
249
  prompt: str,
246
250
  optimizer: str = None,
247
251
  conversationally: bool = False,
248
- ) -> str:
249
- return self.get_message(
250
- self.ask(
251
- prompt, optimizer=optimizer, conversationally=conversationally
252
+ raw: bool = False, # Added raw parameter
253
+ ) -> Union[str, Generator[str, None, None]]:
254
+ def for_stream():
255
+ gen = self.ask(
256
+ prompt, optimizer=optimizer, conversationally=conversationally, raw=raw
252
257
  )
253
- )
254
-
255
- def get_message(self, response: dict) -> str:
256
- assert isinstance(response, dict), "Response should be of dict data-type only"
257
- # Extractor handles formatting
258
- return response.get("text", "").replace('\\n', '\n').replace('\\n\\n', '\n\n')
258
+ if hasattr(gen, '__iter__') and not isinstance(gen, dict):
259
+ for chunk in gen:
260
+ if raw:
261
+ yield chunk
262
+ else:
263
+ yield self.get_message({"text": chunk})
264
+ else:
265
+ if raw:
266
+ yield gen if isinstance(gen, str) else self.get_message(gen)
267
+ else:
268
+ yield self.get_message(gen)
269
+ def for_non_stream():
270
+ result = self.ask(
271
+ prompt, optimizer=optimizer, conversationally=conversationally, raw=raw
272
+ )
273
+ if raw:
274
+ return result if isinstance(result, str) else self.get_message(result)
275
+ else:
276
+ return self.get_message(result)
277
+ return for_stream() if raw else for_non_stream()
259
278
 
260
279
  if __name__ == "__main__":
261
280
  print("-" * 100)
@@ -14,14 +14,14 @@ These providers allow you to easily generate AI‑created art from text prompts
14
14
 
15
15
  ## 📦 Supported Providers
16
16
 
17
- | Provider | Available Models (examples) |
18
- |-----------------|----------------------------------------------------|
19
- | `AIArta` | `flux`, `medieval`, `dreamshaper_xl`, ... |
20
- | `FastFluxAI` | `flux_1_schnell` |
21
- | `MagicStudioAI` | `magicstudio` |
22
- | `PixelMuse` | `flux-schnell`, `imagen-3`, `recraft-v3` |
23
- | `PiclumenAI` | `piclumen-v1` |
24
- | `PollinationsAI`| `flux`, `turbo`, `gptimage` |
17
+ | Provider | Available Models (examples) |
18
+ | ---------------- | ----------------------------------------- |
19
+ | `AIArta` | `flux`, `medieval`, `dreamshaper_xl`, ... |
20
+ | `InfipAI` | `img3`, `img4`, `uncen` |
21
+ | `MagicStudioAI` | `magicstudio` |
22
+ | `PixelMuse` | `flux-schnell`, `imagen-3`, `recraft-v3` |
23
+ | `PiclumenAI` | `piclumen-v1` |
24
+ | `PollinationsAI` | `flux`, `turbo`, `gptimage` |
25
25
 
26
26
  > **Note**: Some providers require the `Pillow` package for image processing.
27
27
 
@@ -71,7 +71,7 @@ response = client.images.create(
71
71
  ## 🔧 Provider Specifics
72
72
 
73
73
  - **AIArta** – Uses Firebase authentication tokens and supports many tattoo‑style models.
74
- - **FastFluxAI** – Simple API for quick image generation.
74
+ - **InfipAI** – Offers various models for different image styles.
75
75
  - **MagicStudioAI** – Generates images through MagicStudio's public endpoint.
76
76
  - **PixelMuse** – Supports several models and converts images from WebP.
77
77
  - **PiclumenAI** – Returns JPEG images directly from the API.
@@ -1,10 +1,11 @@
1
1
  from .pollinations import *
2
2
  from .piclumen import *
3
3
  from .magicstudio import *
4
- from .fastflux import *
5
4
  from .pixelmuse import *
6
5
  from .aiarta import *
7
6
  from .gpt1image import *
8
7
  from .imagen import *
9
8
  from .together import *
10
9
  from .bing import *
10
+ from .infip import *
11
+ from .monochat import *