webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -8,7 +8,7 @@ from typing import List, Dict, Optional, Union, Generator, Any
8
8
  from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
9
  from .utils import (
10
10
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage, get_system_prompt # Import get_system_prompt
11
+ ChatCompletionMessage, CompletionUsage, get_system_prompt, count_tokens # Import count_tokens
12
12
  )
13
13
 
14
14
  # Attempt to import LitAgent, fallback if not available
@@ -56,13 +56,10 @@ class Completions(BaseCompletions):
56
56
  Mimics openai.chat.completions.create
57
57
  Note: YEPCHAT does not support system messages. They will be ignored.
58
58
  """
59
- # Accept both raw and prefixed model names from the user, but always send the raw name to the API
60
- if model.startswith("YEPCHAT/"):
61
- model_raw = model.replace("YEPCHAT/", "", 1)
62
- else:
63
- model_raw = model
59
+ # Only accept and use the raw model name (no prefix logic)
60
+ model_raw = model
64
61
  # Validate model
65
- if f"YEPCHAT/{model_raw}" not in self._client.AVAILABLE_MODELS:
62
+ if model_raw not in self._client.AVAILABLE_MODELS:
66
63
  raise ValueError(
67
64
  f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}"
68
65
  )
@@ -124,6 +121,11 @@ class Completions(BaseCompletions):
124
121
  f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
125
122
  )
126
123
 
124
+ # Track tokens for streaming
125
+ prompt_tokens = count_tokens([m.get('content', '') for m in payload.get('messages', [])])
126
+ completion_tokens = 0
127
+ total_tokens = prompt_tokens
128
+
127
129
  for line in response.iter_lines(decode_unicode=True):
128
130
  if line:
129
131
  line = line.strip()
@@ -139,6 +141,11 @@ class Completions(BaseCompletions):
139
141
  content = delta_data.get('content')
140
142
  role = delta_data.get('role', None)
141
143
 
144
+ # Count tokens for this chunk
145
+ chunk_tokens = count_tokens(content) if content else 0
146
+ completion_tokens += chunk_tokens
147
+ total_tokens = prompt_tokens + completion_tokens
148
+
142
149
  if content is not None or role is not None:
143
150
  delta = ChoiceDelta(content=content, role=role)
144
151
  choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
@@ -148,6 +155,14 @@ class Completions(BaseCompletions):
148
155
  created=created_time,
149
156
  model=model,
150
157
  )
158
+ # Set usage directly on the chunk object
159
+ chunk.usage = {
160
+ "prompt_tokens": prompt_tokens,
161
+ "completion_tokens": completion_tokens,
162
+ "total_tokens": total_tokens,
163
+ "estimated_cost": None
164
+ }
165
+ # Yield the chunk with usage information
151
166
  yield chunk
152
167
 
153
168
  except json.JSONDecodeError:
@@ -163,6 +178,13 @@ class Completions(BaseCompletions):
163
178
  created=created_time,
164
179
  model=model,
165
180
  )
181
+ # Set usage directly on the chunk object
182
+ chunk.usage = {
183
+ "prompt_tokens": prompt_tokens,
184
+ "completion_tokens": completion_tokens,
185
+ "total_tokens": total_tokens,
186
+ "estimated_cost": None
187
+ }
166
188
  yield chunk
167
189
 
168
190
  except cloudscraper.exceptions.CloudflareChallengeError as e:
@@ -209,7 +231,14 @@ class Completions(BaseCompletions):
209
231
  message=message,
210
232
  finish_reason=finish_reason
211
233
  )
212
- usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
234
+ # Use count_tokens to compute usage
235
+ prompt_tokens = count_tokens([m.get('content', '') for m in payload.get('messages', [])])
236
+ completion_tokens = count_tokens(full_response_content)
237
+ usage = CompletionUsage(
238
+ prompt_tokens=prompt_tokens,
239
+ completion_tokens=completion_tokens,
240
+ total_tokens=prompt_tokens + completion_tokens
241
+ )
213
242
  completion = ChatCompletion(
214
243
  id=request_id,
215
244
  choices=[choice],
@@ -237,11 +266,8 @@ class YEPCHAT(OpenAICompatibleProvider):
237
266
  """
238
267
  _base_models = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
239
268
 
240
- # Create AVAILABLE_MODELS as a list with the format "YEPCHAT/model"
241
- AVAILABLE_MODELS = [f"YEPCHAT/{model}" for model in _base_models]
242
-
243
- # Create a mapping dictionary for internal use
244
- _model_mapping = {model: f"YEPCHAT/{model}" for model in _base_models}
269
+ # Create AVAILABLE_MODELS as a list of base model names (no prefix)
270
+ AVAILABLE_MODELS = _base_models
245
271
 
246
272
  def __init__(
247
273
  self,
@@ -1,128 +1,82 @@
1
- # 🎨 WebScout Text-to-Image (TTI) Providers
1
+ # 🖼️ Webscout Text-to-Image (TTI) Providers
2
2
 
3
- Welcome to WebScout's Text-to-Image providers - your ultimate collection of AI art generators! 🚀
3
+ Webscout includes a collection of Text-to-Image providers that follow a common interface inspired by the OpenAI Python client. Each provider exposes an `images.create()` method which returns an `ImageResponse` object containing either image URLs or base64 data.
4
4
 
5
- ## 🌟 Available Providers
5
+ These providers allow you to easily generate AI‑created art from text prompts while handling image conversion and temporary hosting automatically.
6
6
 
7
- * **[AiForce](AiForce/README.md):** Advanced AI image generation with 12 specialized models including Flux-1.1-Pro, SDXL Lightning, and Ideogram, perfect for both quick generations and high-quality art
8
- * **[Nexra](Nexra/README.md):** Next-gen image creation with 19+ models including MidJourney, DALL-E, and specialized SDXL variants for every use case from anime to photorealism
9
- * **[TalkAI](talkai/README.md):** Fast and reliable image generation with comprehensive error handling and dynamic user agent support
10
- * **[PollinationsAI](PollinationsAI/README.md):** Nature-inspired AI art generation with specialized models for organic and natural imagery
11
- * **[Artbit](artbit/README.md):** Bit-perfect AI art creation with precise control over generation parameters
12
- * **[HuggingFace](huggingface/README.md):** Direct integration with HuggingFace's powerful models for research-grade image generation
13
- * **[FreeAIPlayground](FreeAIPlayground/README.md):** Premium image generation with DALL-E 3 and Flux series models including Pro Ultra, Realism, and Aurora variants
14
- * **[PiclumenImager](piclumen/README.md):** Professional photorealistic image generation with advanced concurrent processing, optimized for macro photography, underwater scenes, and architectural visualization
15
- * **[MagicStudio](MagicStudio/README.md):** Generate amazing images with MagicStudio's AI art generator! 🚀
16
- * **[FastFlux](fastflux/README.md):** Generate amazing images with FastFlux's AI art generator! 🚀
17
- * **[ImgSys](ImgSys/README.md):** Multi-provider image generation that creates 2 unique images from random AI models for each prompt, with comprehensive error handling and async support 🎨
18
- * **[PixelMuse](pixelmuse/README.md):** Create stunning AI art with PixelMuse's powerful models including flux-schnell, imagen-3, and recraft-v3, featuring both sync and async support for optimal performance 🎨
7
+ ## Features
19
8
 
20
- ## 🚀 Features
9
+ - **Unified API** – Consistent `images.create()` method for all providers
10
+ - **Multiple Providers** – Generate images using different third‑party services
11
+ - **URL or Base64 Output** – Receive image URLs (uploaded to catbox.moe/0x0.st) or base64 encoded bytes
12
+ - **PNG/JPEG Conversion** – Images are converted in memory to your chosen format
13
+ - **Model Listing** – Query available models with `provider.models.list()`
21
14
 
22
- All providers come with these fire features:
15
+ ## 📦 Supported Providers
23
16
 
24
- ### 🛠️ Core Features
17
+ | Provider | Available Models (examples) |
18
+ |-----------------|----------------------------------------------------|
19
+ | `AIArta` | `flux`, `medieval`, `dreamshaper_xl`, ... |
20
+ | `FastFluxAI` | `flux_1_schnell` |
21
+ | `MagicStudioAI` | `magicstudio` |
22
+ | `PixelMuse` | `flux-schnell`, `imagen-3`, `recraft-v3` |
23
+ | `PiclumenAI` | `piclumen-v1` |
24
+ | `PollinationsAI`| `flux`, `turbo`, `gptimage` |
25
25
 
26
- * Both sync and async implementations
26
+ > **Note**: Some providers require the `Pillow` package for image processing.
27
27
 
28
- * Comprehensive error handling
29
- * Optional logging with cyberpunk theme
30
- * Dynamic user agent generation
31
- * Proxy support
32
- * Customizable timeouts
33
- * Smart retry mechanisms
34
-
35
- ### 💫 Developer Experience
36
-
37
- * Consistent API across all providers
38
-
39
- * Detailed documentation with examples
40
- * Type hints for better IDE support
41
- * Comprehensive error messages
42
- * Easy-to-use interface
43
-
44
- ### 🔒 Security Features
45
-
46
- * Proxy support for privacy
47
-
48
- * Configurable timeouts
49
- * Safe error handling
50
- * Optional verbose logging
51
- * Dynamic user agent rotation
52
-
53
- ## 🎯 Usage Example
28
+ ## 🚀 Quick Start
54
29
 
55
30
  ```python
56
- # Sync way
57
- from webscout.Provider.TTI import BlackboxAIImager
58
-
59
- imager = BlackboxAIImager()
60
- images = imager.generate("Epic dragon breathing fire", amount=2)
61
- paths = imager.save(images)
31
+ from webscout.Provider.TTI import PixelMuse
62
32
 
63
- # Async way
64
- from webscout.Provider.TTI import AsyncDeepInfraImager
65
- import asyncio
33
+ # Initialize the provider
34
+ client = PixelMuse()
66
35
 
67
- async def generate_art():
68
- imager = AsyncDeepInfraImager()
69
- images = await imager.generate("Cyberpunk city at night")
70
- paths = await imager.save(images)
36
+ # Generate two images and get URLs
37
+ response = client.images.create(
38
+ model="flux-schnell",
39
+ prompt="A futuristic city skyline at sunset",
40
+ n=2,
41
+ response_format="url"
42
+ )
71
43
 
72
- asyncio.run(generate_art())
44
+ print(response)
73
45
  ```
74
46
 
75
- ## 🔧 Installation
47
+ ### Accessing Models
76
48
 
77
- ```bash
78
- pip install webscout
79
- ```
49
+ Each provider exposes the models it supports:
80
50
 
81
- ## 📚 Common Interface
51
+ ```python
52
+ from webscout.Provider.TTI import AIArta
82
53
 
83
- All providers implement these base classes:
54
+ ai = AIArta()
55
+ print(ai.models.list()) # List model identifiers
56
+ ```
84
57
 
85
- * `ImageProvider` for sync operations
86
- * `AsyncImageProvider` for async operations
58
+ ### Base64 Output
87
59
 
88
- ### 🎨 Common Methods
60
+ If you prefer the raw image data:
89
61
 
90
62
  ```python
91
- def generate(
92
- self,
93
- prompt: str, # Your creative prompt
94
- amount: int = 1, # Number of images
95
- max_retries: int = 3, # Max retry attempts
96
- retry_delay: int = 5 # Delay between retries
97
- ) -> List[bytes]: # Returns image data
98
- ...
99
-
100
- def save(
101
- self,
102
- response: List[bytes], # Image data
103
- name: str = None, # Base filename
104
- dir: str = os.getcwd(),# Save directory
105
- prefix: str = "" # Filename prefix
106
- ) -> List[str]: # Returns saved paths
107
- ...
63
+ response = client.images.create(
64
+ model="flux-schnell",
65
+ prompt="Crystal mountain landscape",
66
+ response_format="b64_json"
67
+ )
68
+ # `response.data` will contain base64 strings
108
69
  ```
109
70
 
110
- ## 🛡️ Error Handling
111
-
112
- All providers use these standard exceptions:
113
-
114
- * `APIConnectionError`: Network/connection issues
115
- * `InvalidResponseError`: Invalid API responses
116
- * `FailedToGenerateResponseError`: Generation failures
71
+ ## 🔧 Provider Specifics
117
72
 
118
- ## 🎛️ Configuration
73
+ - **AIArta** – Uses Firebase authentication tokens and supports many tattoo‑style models.
74
+ - **FastFluxAI** – Simple API for quick image generation.
75
+ - **MagicStudioAI** – Generates images through MagicStudio's public endpoint.
76
+ - **PixelMuse** – Supports several models and converts images from WebP.
77
+ - **PiclumenAI** – Returns JPEG images directly from the API.
78
+ - **PollinationsAI** – Allows setting a custom seed for reproducible results.
119
79
 
120
- Common configuration options:
80
+ ## 🤝 Contributing
121
81
 
122
- ```python
123
- imager = Provider(
124
- timeout=60, # Request timeout
125
- proxies={}, # Proxy settings
126
-
127
- )
128
- ```
82
+ Contributions and additional providers are welcome! Feel free to submit a pull request.
@@ -1,12 +1,7 @@
1
- from .FreeAIPlayground import *
2
- from .PollinationsAI import *
3
- from .AiForce import *
4
- from .Nexra import *
5
- from .huggingface import *
6
- from .artbit import *
7
- from .talkai import *
1
+
2
+ from .pollinations import *
8
3
  from .piclumen import *
9
- from .MagicStudio import *
4
+ from .magicstudio import *
10
5
  from .fastflux import *
11
6
  from .pixelmuse import *
12
- from .ImgSys import *
7
+ from .aiarta import *
@@ -0,0 +1,365 @@
1
+ """AIArtaImager TTI-Compatible Provider - Generate stunning AI art with AI Arta! 🎨
2
+
3
+ Examples:
4
+ >>> from webscout.Provider.TTI.aiarta import AIArta
5
+ >>> client = AIArta()
6
+ >>> response = client.images.create(
7
+ ... model="flux",
8
+ ... prompt="A cool cyberpunk city at night",
9
+ ... n=1
10
+ ... )
11
+ >>> print(response)
12
+ """
13
+
14
+ import requests
15
+ from typing import Optional, List, Dict, Any
16
+ from webscout.Provider.TTI.utils import ImageData, ImageResponse
17
+ from webscout.Provider.TTI.base import TTICompatibleProvider, BaseImages
18
+ from io import BytesIO
19
+ import os
20
+ import tempfile
21
+ from webscout.litagent import LitAgent
22
+ import time
23
+ import json
24
+
25
+ try:
26
+ from PIL import Image
27
+ except ImportError:
28
+ Image = None
29
+
30
+ class Images(BaseImages):
31
+ def __init__(self, client: 'AIArta'):
32
+ self._client = client
33
+
34
+ def create(
35
+ self,
36
+ *,
37
+ model: str,
38
+ prompt: str,
39
+ n: int = 1,
40
+ size: str = "1024x1024",
41
+ response_format: str = "url",
42
+ user: Optional[str] = None,
43
+ style: str = "none",
44
+ aspect_ratio: str = "1:1",
45
+ timeout: int = 60,
46
+ image_format: str = "png",
47
+ **kwargs
48
+ ) -> ImageResponse:
49
+ """
50
+ image_format: "png" or "jpeg"
51
+ """
52
+ if Image is None:
53
+ raise ImportError("Pillow (PIL) is required for image format conversion.")
54
+
55
+ images = []
56
+ urls = []
57
+ agent = LitAgent()
58
+
59
+ def upload_file_with_retry(img_bytes, image_format, max_retries=3):
60
+ ext = "jpg" if image_format.lower() == "jpeg" else "png"
61
+ for attempt in range(max_retries):
62
+ tmp_path = None
63
+ try:
64
+ with tempfile.NamedTemporaryFile(suffix=f".{ext}", delete=False) as tmp:
65
+ tmp.write(img_bytes)
66
+ tmp.flush()
67
+ tmp_path = tmp.name
68
+ with open(tmp_path, 'rb') as f:
69
+ files = {
70
+ 'fileToUpload': (f'image.{ext}', f, f'image/{ext}')
71
+ }
72
+ data = {
73
+ 'reqtype': 'fileupload',
74
+ 'json': 'true'
75
+ }
76
+ headers = {'User-Agent': agent.random()}
77
+ if attempt > 0:
78
+ headers['Connection'] = 'close'
79
+ resp = requests.post("https://catbox.moe/user/api.php", files=files, data=data, headers=headers, timeout=timeout)
80
+ if resp.status_code == 200 and resp.text.strip():
81
+ text = resp.text.strip()
82
+ if text.startswith('http'):
83
+ return text
84
+ try:
85
+ result = resp.json()
86
+ if "url" in result:
87
+ return result["url"]
88
+ except json.JSONDecodeError:
89
+ if 'http' in text:
90
+ return text
91
+ except Exception:
92
+ if attempt < max_retries - 1:
93
+ time.sleep(1 * (attempt + 1))
94
+ finally:
95
+ if tmp_path and os.path.isfile(tmp_path):
96
+ try:
97
+ os.remove(tmp_path)
98
+ except Exception:
99
+ pass
100
+ return None
101
+
102
+ def upload_file_alternative(img_bytes, image_format):
103
+ try:
104
+ ext = "jpg" if image_format.lower() == "jpeg" else "png"
105
+ with tempfile.NamedTemporaryFile(suffix=f".{ext}", delete=False) as tmp:
106
+ tmp.write(img_bytes)
107
+ tmp.flush()
108
+ tmp_path = tmp.name
109
+ try:
110
+ if not os.path.isfile(tmp_path):
111
+ return None
112
+ with open(tmp_path, 'rb') as img_file:
113
+ files = {'file': img_file}
114
+ response = requests.post('https://0x0.st', files=files)
115
+ response.raise_for_status()
116
+ image_url = response.text.strip()
117
+ if not image_url.startswith('http'):
118
+ return None
119
+ return image_url
120
+ except Exception:
121
+ return None
122
+ finally:
123
+ try:
124
+ os.remove(tmp_path)
125
+ except Exception:
126
+ pass
127
+ except Exception:
128
+ return None
129
+
130
+ for _ in range(n):
131
+ # Step 1: Get Authentication Token
132
+ auth_data = self._client.read_and_refresh_token()
133
+ gen_headers = {
134
+ "Authorization": auth_data.get("idToken"),
135
+ }
136
+ # Remove content-type header for form data
137
+ if "content-type" in self._client.session.headers:
138
+ del self._client.session.headers["content-type"]
139
+ # get_model now returns the proper style name from model_aliases
140
+ style_value = self._client.get_model(model)
141
+ image_payload = {
142
+ "prompt": str(prompt),
143
+ "negative_prompt": str(kwargs.get("negative_prompt", "blurry, deformed hands, ugly")),
144
+ "style": str(style_value),
145
+ "images_num": str(1), # Generate one image at a time in the loop
146
+ "cfg_scale": str(kwargs.get("guidance_scale", 7)),
147
+ "steps": str(kwargs.get("num_inference_steps", 30)),
148
+ "aspect_ratio": str(aspect_ratio),
149
+ }
150
+ # Step 2: Generate Image (send as form data, not JSON)
151
+ image_response = self._client.session.post(
152
+ self._client.image_generation_url,
153
+ data=image_payload, # Use form data instead of JSON
154
+ headers=gen_headers,
155
+ timeout=timeout
156
+ )
157
+ if image_response.status_code != 200:
158
+ raise RuntimeError(f"AIArta API error {image_response.status_code}: {image_response.text}\nPayload: {image_payload}")
159
+ image_data = image_response.json()
160
+ record_id = image_data.get("record_id")
161
+ if not record_id:
162
+ raise RuntimeError(f"Failed to initiate image generation: {image_data}")
163
+ # Step 3: Check Generation Status
164
+ status_url = self._client.status_check_url.format(record_id=record_id)
165
+ while True:
166
+ status_response = self._client.session.get(
167
+ status_url,
168
+ headers=gen_headers,
169
+ timeout=timeout
170
+ )
171
+ status_data = status_response.json()
172
+ status = status_data.get("status")
173
+ if status == "DONE":
174
+ image_urls = [image["url"] for image in status_data.get("response", [])]
175
+ if not image_urls:
176
+ raise RuntimeError("No image URLs returned from AIArta")
177
+ img_resp = self._client.session.get(image_urls[0], timeout=timeout)
178
+ img_resp.raise_for_status()
179
+ img_bytes = img_resp.content
180
+ # Convert to png or jpeg in memory
181
+ with BytesIO(img_bytes) as input_io:
182
+ with Image.open(input_io) as im:
183
+ out_io = BytesIO()
184
+ if image_format.lower() == "jpeg":
185
+ im = im.convert("RGB")
186
+ im.save(out_io, format="JPEG")
187
+ else:
188
+ im.save(out_io, format="PNG")
189
+ img_bytes = out_io.getvalue()
190
+ images.append(img_bytes)
191
+ if response_format == "url":
192
+ uploaded_url = upload_file_with_retry(img_bytes, image_format)
193
+ if not uploaded_url:
194
+ uploaded_url = upload_file_alternative(img_bytes, image_format)
195
+ if uploaded_url:
196
+ urls.append(uploaded_url)
197
+ else:
198
+ raise RuntimeError("Failed to upload image to catbox.moe using all available methods")
199
+ break
200
+ elif status in ("IN_QUEUE", "IN_PROGRESS"):
201
+ time.sleep(2)
202
+ else:
203
+ raise RuntimeError(f"Image generation failed with status: {status}")
204
+
205
+ result_data = []
206
+ if response_format == "url":
207
+ for url in urls:
208
+ result_data.append(ImageData(url=url))
209
+ elif response_format == "b64_json":
210
+ import base64
211
+ for img in images:
212
+ b64 = base64.b64encode(img).decode("utf-8")
213
+ result_data.append(ImageData(b64_json=b64))
214
+ else:
215
+ raise ValueError("response_format must be 'url' or 'b64_json'")
216
+
217
+ from time import time as _time
218
+ return ImageResponse(
219
+ created=int(_time()),
220
+ data=result_data
221
+ )
222
+
223
+ class AIArta(TTICompatibleProvider):
224
+ # Model aliases mapping from lowercase keys to proper API style names
225
+ model_aliases = {
226
+ "flux": "Flux",
227
+ "medieval": "Medieval",
228
+ "vincent_van_gogh": "Vincent Van Gogh",
229
+ "f_dev": "F Dev",
230
+ "low_poly": "Low Poly",
231
+ "dreamshaper_xl": "Dreamshaper-xl",
232
+ "anima_pencil_xl": "Anima-pencil-xl",
233
+ "biomech": "Biomech",
234
+ "trash_polka": "Trash Polka",
235
+ "no_style": "No Style",
236
+ "cheyenne_xl": "Cheyenne-xl",
237
+ "chicano": "Chicano",
238
+ "embroidery_tattoo": "Embroidery tattoo",
239
+ "red_and_black": "Red and Black",
240
+ "fantasy_art": "Fantasy Art",
241
+ "watercolor": "Watercolor",
242
+ "dotwork": "Dotwork",
243
+ "old_school_colored": "Old school colored",
244
+ "realistic_tattoo": "Realistic tattoo",
245
+ "japanese_2": "Japanese_2",
246
+ "realistic_stock_xl": "Realistic-stock-xl",
247
+ "f_pro": "F Pro",
248
+ "revanimated": "RevAnimated",
249
+ "katayama_mix_xl": "Katayama-mix-xl",
250
+ "sdxl_l": "SDXL L",
251
+ "cor_epica_xl": "Cor-epica-xl",
252
+ "anime_tattoo": "Anime tattoo",
253
+ "new_school": "New School",
254
+ "death_metal": "Death metal",
255
+ "old_school": "Old School",
256
+ "juggernaut_xl": "Juggernaut-xl",
257
+ "photographic": "Photographic",
258
+ "sdxl_1_0": "SDXL 1.0",
259
+ "graffiti": "Graffiti",
260
+ "mini_tattoo": "Mini tattoo",
261
+ "surrealism": "Surrealism",
262
+ "neo_traditional": "Neo-traditional",
263
+ "on_limbs_black": "On limbs black",
264
+ "yamers_realistic_xl": "Yamers-realistic-xl",
265
+ "pony_xl": "Pony-xl",
266
+ "playground_xl": "Playground-xl",
267
+ "anything_xl": "Anything-xl",
268
+ "flame_design": "Flame design",
269
+ "kawaii": "Kawaii",
270
+ "cinematic_art": "Cinematic Art",
271
+ "professional": "Professional",
272
+ "black_ink": "Black Ink"
273
+ }
274
+
275
+ AVAILABLE_MODELS = list(model_aliases.keys())
276
+ default_model = "Flux"
277
+ default_image_model = default_model
278
+
279
+ def __init__(self):
280
+ self.image_generation_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image"
281
+ self.status_check_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image/{record_id}/status"
282
+ self.auth_url = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
283
+ self.token_refresh_url = "https://securetoken.googleapis.com/v1/token?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
284
+ self.session = requests.Session()
285
+ self.user_agent = LitAgent().random()
286
+ self.headers = {
287
+ "accept": "application/json",
288
+ "accept-language": "en-US,en;q=0.9",
289
+ "origin": "https://img-gen-prod.ai-arta.com",
290
+ "referer": "https://img-gen-prod.ai-arta.com/",
291
+ "user-agent": self.user_agent,
292
+ }
293
+ self.session.headers.update(self.headers)
294
+ self.images = Images(self)
295
+
296
+ def get_auth_file(self) -> str:
297
+ path = os.path.join(os.path.expanduser("~"), ".ai_arta_cookies")
298
+ if not os.path.exists(path):
299
+ os.makedirs(path)
300
+ filename = f"auth_{self.__class__.__name__}.json"
301
+ return os.path.join(path, filename)
302
+
303
+ def create_token(self, path: str) -> Dict[str, Any]:
304
+ auth_payload = {"clientType": "CLIENT_TYPE_ANDROID"}
305
+ proxies = self.session.proxies if self.session.proxies else None
306
+ auth_response = self.session.post(self.auth_url, json=auth_payload, timeout=60, proxies=proxies)
307
+ auth_data = auth_response.json()
308
+ auth_token = auth_data.get("idToken")
309
+ if not auth_token:
310
+ raise Exception("Failed to obtain authentication token.")
311
+ with open(path, 'w') as f:
312
+ json.dump(auth_data, f)
313
+ return auth_data
314
+
315
+ def refresh_token(self, refresh_token: str) -> tuple[str, str]:
316
+ payload = {
317
+ "grant_type": "refresh_token",
318
+ "refresh_token": refresh_token,
319
+ }
320
+ response = self.session.post(self.token_refresh_url, data=payload, timeout=60)
321
+ response_data = response.json()
322
+ return response_data.get("id_token"), response_data.get("refresh_token")
323
+
324
+ def read_and_refresh_token(self) -> Dict[str, Any]:
325
+ path = self.get_auth_file()
326
+ if os.path.isfile(path):
327
+ with open(path, 'r') as f:
328
+ auth_data = json.load(f)
329
+ diff = time.time() - os.path.getmtime(path)
330
+ expires_in = int(auth_data.get("expiresIn", 3600))
331
+ if diff < expires_in:
332
+ if diff > expires_in / 2:
333
+ auth_data["idToken"], auth_data["refreshToken"] = self.refresh_token(
334
+ auth_data.get("refreshToken")
335
+ )
336
+ with open(path, 'w') as f:
337
+ json.dump(auth_data, f)
338
+ return auth_data
339
+ return self.create_token(path)
340
+
341
+ def get_model(self, model_name: str) -> str:
342
+ # Convert to lowercase for lookup
343
+ model_key = model_name.lower()
344
+ # Return the proper style name from model_aliases, or the original if not found
345
+ return self.model_aliases.get(model_key, model_name)
346
+
347
+ @property
348
+ def models(self):
349
+ class _ModelList:
350
+ def list(inner_self):
351
+ return type(self).AVAILABLE_MODELS
352
+ return _ModelList()
353
+
354
+ # Example usage:
355
+ if __name__ == "__main__":
356
+ from rich import print
357
+ client = AIArta()
358
+ response = client.images.create(
359
+ model="flux",
360
+ prompt="a white siamese cat",
361
+ response_format="url",
362
+ n=2,
363
+ timeout=30,
364
+ )
365
+ print(response)
File without changes