webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +146 -37
- webscout/Bing_search.py +1 -2
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/ExaChat.py +84 -58
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +34 -51
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +30 -29
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +3 -1
- webscout/Provider/OPENAI/autoproxy.py +752 -17
- webscout/Provider/OPENAI/base.py +7 -76
- webscout/Provider/OPENAI/deepinfra.py +42 -108
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/typegpt.py +1 -1
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +78 -70
- webscout/Provider/TogetherAI.py +32 -48
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +159 -96
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +41 -46
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +33 -17
- webscout/Provider/yep.py +79 -96
- webscout/auth/__init__.py +12 -1
- webscout/auth/providers.py +27 -5
- webscout/auth/routes.py +128 -104
- webscout/auth/server.py +367 -312
- webscout/client.py +121 -116
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/version.py +1 -1
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- webscout/auth/static/favicon.svg +0 -11
- webscout/auth/swagger_ui.py +0 -203
- webscout/auth/templates/components/authentication.html +0 -237
- webscout/auth/templates/components/base.html +0 -103
- webscout/auth/templates/components/endpoints.html +0 -750
- webscout/auth/templates/components/examples.html +0 -491
- webscout/auth/templates/components/footer.html +0 -75
- webscout/auth/templates/components/header.html +0 -27
- webscout/auth/templates/components/models.html +0 -286
- webscout/auth/templates/components/navigation.html +0 -70
- webscout/auth/templates/static/api.js +0 -455
- webscout/auth/templates/static/icons.js +0 -168
- webscout/auth/templates/static/main.js +0 -784
- webscout/auth/templates/static/particles.js +0 -201
- webscout/auth/templates/static/styles.css +0 -3353
- webscout/auth/templates/static/ui.js +0 -374
- webscout/auth/templates/swagger_ui.html +0 -170
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
webscout/Provider/TTI/aiarta.py
CHANGED
|
@@ -24,6 +24,8 @@ import tempfile
|
|
|
24
24
|
from webscout.litagent import LitAgent
|
|
25
25
|
import time
|
|
26
26
|
import json
|
|
27
|
+
import random
|
|
28
|
+
from pathlib import Path
|
|
27
29
|
|
|
28
30
|
try:
|
|
29
31
|
from PIL import Image
|
|
@@ -31,6 +33,10 @@ except ImportError:
|
|
|
31
33
|
Image = None
|
|
32
34
|
|
|
33
35
|
|
|
36
|
+
class ModelNotFoundError(Exception):
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
|
|
34
40
|
class Images(BaseImages):
|
|
35
41
|
def __init__(self, client: "AIArta"):
|
|
36
42
|
self._client = client
|
|
@@ -140,22 +146,23 @@ class Images(BaseImages):
|
|
|
140
146
|
gen_headers = {
|
|
141
147
|
"Authorization": auth_data.get("idToken"),
|
|
142
148
|
}
|
|
143
|
-
# Remove content-type header for form data
|
|
144
149
|
if "content-type" in self._client.session.headers:
|
|
145
150
|
del self._client.session.headers["content-type"]
|
|
146
|
-
#
|
|
147
|
-
style_value = self._client.get_model(model)
|
|
151
|
+
# Use the model name directly, not as 'style'
|
|
148
152
|
image_payload = {
|
|
149
153
|
"prompt": str(prompt),
|
|
150
154
|
"negative_prompt": str(
|
|
151
155
|
kwargs.get("negative_prompt", "blurry, deformed hands, ugly")
|
|
152
156
|
),
|
|
153
|
-
"style": str(
|
|
154
|
-
"images_num": str(1),
|
|
157
|
+
"style": str(model), # Use 'style' key for the model name
|
|
158
|
+
"images_num": str(1),
|
|
155
159
|
"cfg_scale": str(kwargs.get("guidance_scale", 7)),
|
|
156
160
|
"steps": str(kwargs.get("num_inference_steps", 30)),
|
|
157
161
|
"aspect_ratio": str(aspect_ratio),
|
|
158
162
|
}
|
|
163
|
+
# Remove 'model' from payload if present
|
|
164
|
+
if "model" in image_payload:
|
|
165
|
+
del image_payload["model"]
|
|
159
166
|
# Step 2: Generate Image (send as form data, not JSON)
|
|
160
167
|
image_response = self._client.session.post(
|
|
161
168
|
self._client.image_generation_url,
|
|
@@ -241,68 +248,67 @@ class Images(BaseImages):
|
|
|
241
248
|
|
|
242
249
|
|
|
243
250
|
class AIArta(TTICompatibleProvider):
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
"
|
|
251
|
-
"
|
|
252
|
-
"
|
|
253
|
-
"
|
|
254
|
-
"
|
|
255
|
-
"
|
|
256
|
-
"
|
|
257
|
-
"
|
|
258
|
-
"
|
|
259
|
-
"
|
|
260
|
-
"
|
|
261
|
-
"
|
|
262
|
-
"
|
|
263
|
-
"
|
|
264
|
-
"
|
|
265
|
-
"
|
|
266
|
-
"
|
|
267
|
-
"
|
|
268
|
-
"
|
|
269
|
-
"
|
|
270
|
-
"
|
|
271
|
-
"
|
|
272
|
-
"
|
|
273
|
-
"
|
|
274
|
-
"
|
|
275
|
-
"
|
|
276
|
-
"
|
|
277
|
-
"
|
|
278
|
-
"
|
|
279
|
-
"
|
|
280
|
-
"
|
|
281
|
-
"
|
|
282
|
-
"
|
|
283
|
-
"
|
|
284
|
-
"
|
|
285
|
-
"
|
|
286
|
-
"
|
|
287
|
-
"
|
|
288
|
-
"
|
|
289
|
-
"
|
|
290
|
-
"
|
|
291
|
-
"
|
|
292
|
-
"
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
251
|
+
url = "https://ai-arta.com"
|
|
252
|
+
auth_url = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
|
|
253
|
+
token_refresh_url = "https://securetoken.googleapis.com/v1/token?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
|
|
254
|
+
image_generation_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image"
|
|
255
|
+
status_check_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image/{record_id}/status"
|
|
256
|
+
AVAILABLE_MODELS = [
|
|
257
|
+
"Anything-xl",
|
|
258
|
+
"High GPT4o",
|
|
259
|
+
"On limbs black",
|
|
260
|
+
"F Dev",
|
|
261
|
+
"SDXL 1.0",
|
|
262
|
+
"Old School",
|
|
263
|
+
"Vincent Van Gogh",
|
|
264
|
+
"Cor-epica-xl",
|
|
265
|
+
"Professional",
|
|
266
|
+
"Cheyenne-xl",
|
|
267
|
+
"Chicano",
|
|
268
|
+
"SDXL L",
|
|
269
|
+
"Black Ink",
|
|
270
|
+
"Juggernaut-xl",
|
|
271
|
+
"Cinematic Art",
|
|
272
|
+
"Dreamshaper-xl",
|
|
273
|
+
"Fantasy Art",
|
|
274
|
+
"Neo-traditional",
|
|
275
|
+
"Realistic-stock-xl",
|
|
276
|
+
"Flame design",
|
|
277
|
+
"Japanese_2",
|
|
278
|
+
"Medieval",
|
|
279
|
+
"Surrealism",
|
|
280
|
+
"Dotwork",
|
|
281
|
+
"Graffiti",
|
|
282
|
+
"RevAnimated",
|
|
283
|
+
"On limbs color",
|
|
284
|
+
"Old school colored",
|
|
285
|
+
"GPT4o Ghibli",
|
|
286
|
+
"Low Poly",
|
|
287
|
+
"GPT4o",
|
|
288
|
+
"No Style",
|
|
289
|
+
"Anime",
|
|
290
|
+
"tattoo",
|
|
291
|
+
"Embroidery tattoo",
|
|
292
|
+
"Mini tattoo",
|
|
293
|
+
"Realistic tattoo",
|
|
294
|
+
"Playground-xl",
|
|
295
|
+
"Watercolor",
|
|
296
|
+
"F Pro",
|
|
297
|
+
"Kawaii",
|
|
298
|
+
"Photographic",
|
|
299
|
+
"Katayama-mix-xl",
|
|
300
|
+
"Death metal",
|
|
301
|
+
"New School",
|
|
302
|
+
"Pony-xl",
|
|
303
|
+
"Anima-pencil-xl",
|
|
304
|
+
"Flux",
|
|
305
|
+
"Biomech",
|
|
306
|
+
"Yamers-realistic-xl",
|
|
307
|
+
"Trash Polka",
|
|
308
|
+
"Red and Black",
|
|
309
|
+
]
|
|
298
310
|
|
|
299
311
|
def __init__(self):
|
|
300
|
-
self.image_generation_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image"
|
|
301
|
-
self.status_check_url = (
|
|
302
|
-
"https://img-gen-prod.ai-arta.com/api/v1/text2image/{record_id}/status"
|
|
303
|
-
)
|
|
304
|
-
self.auth_url = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
|
|
305
|
-
self.token_refresh_url = "https://securetoken.googleapis.com/v1/token?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
|
|
306
312
|
self.session = requests.Session()
|
|
307
313
|
self.user_agent = LitAgent().random()
|
|
308
314
|
self.headers = {
|
|
@@ -316,11 +322,11 @@ class AIArta(TTICompatibleProvider):
|
|
|
316
322
|
self.images = Images(self)
|
|
317
323
|
|
|
318
324
|
def get_auth_file(self) -> str:
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
os.makedirs(path)
|
|
325
|
+
import tempfile
|
|
326
|
+
# Use a temp file in the system's temp directory, unique per class
|
|
322
327
|
filename = f"auth_{self.__class__.__name__}.json"
|
|
323
|
-
|
|
328
|
+
temp_dir = tempfile.gettempdir()
|
|
329
|
+
return os.path.join(temp_dir, filename)
|
|
324
330
|
|
|
325
331
|
def create_token(self, path: str) -> Dict[str, Any]:
|
|
326
332
|
auth_payload = {"clientType": "CLIENT_TYPE_ANDROID"}
|
|
@@ -369,17 +375,25 @@ class AIArta(TTICompatibleProvider):
|
|
|
369
375
|
return auth_data
|
|
370
376
|
return self.create_token(path)
|
|
371
377
|
|
|
372
|
-
def get_model(self,
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
378
|
+
def get_model(self, model: str) -> str:
|
|
379
|
+
if not model:
|
|
380
|
+
return self.default_model
|
|
381
|
+
if model in self.models:
|
|
382
|
+
return model
|
|
383
|
+
raise ModelNotFoundError(f"Model {model} not found")
|
|
377
384
|
|
|
378
385
|
@property
|
|
379
386
|
def models(self):
|
|
380
387
|
class _ModelList:
|
|
381
388
|
def list(inner_self):
|
|
382
389
|
return type(self).AVAILABLE_MODELS
|
|
390
|
+
return _ModelList()
|
|
391
|
+
|
|
392
|
+
@property
|
|
393
|
+
def models_list(self):
|
|
394
|
+
class _ModelList:
|
|
395
|
+
def list(inner_self):
|
|
396
|
+
return type(self).models
|
|
383
397
|
|
|
384
398
|
return _ModelList()
|
|
385
399
|
|
|
@@ -390,10 +404,10 @@ if __name__ == "__main__":
|
|
|
390
404
|
|
|
391
405
|
client = AIArta()
|
|
392
406
|
response = client.images.create(
|
|
393
|
-
model="
|
|
394
|
-
prompt="
|
|
407
|
+
model="GPT4o",
|
|
408
|
+
prompt="Chitt Robot saying 'Hello World'",
|
|
395
409
|
response_format="url",
|
|
396
|
-
n=
|
|
397
|
-
timeout=
|
|
410
|
+
n=1,
|
|
411
|
+
timeout=3000,
|
|
398
412
|
)
|
|
399
413
|
print(response)
|
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
"""InfipAI TTI-Compatible Provider - Generate images with Infip AI! 🎨
|
|
2
|
+
|
|
3
|
+
This module provides access to Infip's image generation API through a unified interface.
|
|
4
|
+
Supports img3, img4, and uncen models with various aspect ratios and customization options.
|
|
5
|
+
|
|
6
|
+
Example Usage:
|
|
7
|
+
from webscout.Provider.TTI.infip import InfipAI
|
|
8
|
+
|
|
9
|
+
# Initialize the provider
|
|
10
|
+
client = InfipAI()
|
|
11
|
+
|
|
12
|
+
# Generate an image
|
|
13
|
+
response = client.images.create(
|
|
14
|
+
model="img3",
|
|
15
|
+
prompt="A beautiful sunset over mountains",
|
|
16
|
+
n=1,
|
|
17
|
+
aspect_ratio="IMAGE_ASPECT_RATIO_LANDSCAPE",
|
|
18
|
+
seed=42
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
# Get the image URL
|
|
22
|
+
image_url = response.data[0].url
|
|
23
|
+
print(f"Generated image: {image_url}")
|
|
24
|
+
|
|
25
|
+
Available Models:
|
|
26
|
+
- img3: High-quality image generation
|
|
27
|
+
- img4: Enhanced image generation model
|
|
28
|
+
- uncen: Uncensored image generation model
|
|
29
|
+
|
|
30
|
+
Supported Aspect Ratios:
|
|
31
|
+
- IMAGE_ASPECT_RATIO_LANDSCAPE: 16:9 landscape
|
|
32
|
+
- IMAGE_ASPECT_RATIO_PORTRAIT: 9:16 portrait
|
|
33
|
+
- IMAGE_ASPECT_RATIO_SQUARE: 1:1 square
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
import requests
|
|
37
|
+
from typing import Optional
|
|
38
|
+
from webscout.Provider.TTI.utils import (
|
|
39
|
+
ImageData,
|
|
40
|
+
ImageResponse
|
|
41
|
+
)
|
|
42
|
+
from webscout.Provider.TTI.base import TTICompatibleProvider, BaseImages
|
|
43
|
+
from webscout.litagent import LitAgent
|
|
44
|
+
import time
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class Images(BaseImages):
|
|
48
|
+
def __init__(self, client):
|
|
49
|
+
self._client = client
|
|
50
|
+
|
|
51
|
+
def create(
|
|
52
|
+
self,
|
|
53
|
+
*,
|
|
54
|
+
model: str,
|
|
55
|
+
prompt: str,
|
|
56
|
+
n: int = 1,
|
|
57
|
+
size: str = "1024x1024",
|
|
58
|
+
response_format: str = "url",
|
|
59
|
+
user: Optional[str] = None,
|
|
60
|
+
style: str = "none",
|
|
61
|
+
aspect_ratio: str = "IMAGE_ASPECT_RATIO_LANDSCAPE",
|
|
62
|
+
timeout: int = 60,
|
|
63
|
+
image_format: str = "png",
|
|
64
|
+
seed: Optional[int] = None,
|
|
65
|
+
**kwargs,
|
|
66
|
+
) -> ImageResponse:
|
|
67
|
+
"""
|
|
68
|
+
Create images using Infip AI API.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
model: The model to use ("img3", "img4", or "uncen")
|
|
72
|
+
prompt: Text description of the image to generate
|
|
73
|
+
n: Number of images to generate (default: 1)
|
|
74
|
+
size: Image size (ignored, aspect_ratio is used instead)
|
|
75
|
+
response_format: "url" or "b64_json" (default: "url")
|
|
76
|
+
user: Optional user identifier (ignored)
|
|
77
|
+
style: Optional style (ignored)
|
|
78
|
+
aspect_ratio: Image aspect ratio ("IMAGE_ASPECT_RATIO_LANDSCAPE",
|
|
79
|
+
"IMAGE_ASPECT_RATIO_PORTRAIT", "IMAGE_ASPECT_RATIO_SQUARE")
|
|
80
|
+
timeout: Request timeout in seconds (default: 60)
|
|
81
|
+
image_format: Image format "png" or "jpeg" (ignored by API)
|
|
82
|
+
seed: Random seed for reproducibility (default: 0 for random)
|
|
83
|
+
**kwargs: Additional parameters
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
ImageResponse: The generated images
|
|
87
|
+
|
|
88
|
+
Raises:
|
|
89
|
+
ValueError: If model is not supported
|
|
90
|
+
RuntimeError: If image generation fails
|
|
91
|
+
"""
|
|
92
|
+
if model not in self._client.AVAILABLE_MODELS:
|
|
93
|
+
raise ValueError(f"Model '{model}' not supported. Available models: {self._client.AVAILABLE_MODELS}")
|
|
94
|
+
|
|
95
|
+
# Validate aspect ratio
|
|
96
|
+
valid_ratios = [
|
|
97
|
+
"IMAGE_ASPECT_RATIO_LANDSCAPE",
|
|
98
|
+
"IMAGE_ASPECT_RATIO_PORTRAIT",
|
|
99
|
+
"IMAGE_ASPECT_RATIO_SQUARE"
|
|
100
|
+
]
|
|
101
|
+
if aspect_ratio not in valid_ratios:
|
|
102
|
+
aspect_ratio = "IMAGE_ASPECT_RATIO_LANDSCAPE"
|
|
103
|
+
|
|
104
|
+
# Prepare request payload
|
|
105
|
+
payload = {
|
|
106
|
+
"prompt": prompt,
|
|
107
|
+
"num_images": n,
|
|
108
|
+
"seed": seed if seed is not None else 0,
|
|
109
|
+
"aspect_ratio": aspect_ratio,
|
|
110
|
+
"models": model
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
# Make API request
|
|
115
|
+
response = self._client.session.post(
|
|
116
|
+
self._client.api_endpoint,
|
|
117
|
+
json=payload,
|
|
118
|
+
timeout=timeout
|
|
119
|
+
)
|
|
120
|
+
response.raise_for_status()
|
|
121
|
+
|
|
122
|
+
# Parse response
|
|
123
|
+
result = response.json()
|
|
124
|
+
|
|
125
|
+
if "images" not in result or not result["images"]:
|
|
126
|
+
raise RuntimeError("No images returned from Infip API")
|
|
127
|
+
|
|
128
|
+
# Process response based on format
|
|
129
|
+
result_data = []
|
|
130
|
+
|
|
131
|
+
if response_format == "url":
|
|
132
|
+
for image_url in result["images"]:
|
|
133
|
+
result_data.append(ImageData(url=image_url))
|
|
134
|
+
elif response_format == "b64_json":
|
|
135
|
+
# For b64_json format, we need to download and encode the images
|
|
136
|
+
import base64
|
|
137
|
+
for image_url in result["images"]:
|
|
138
|
+
try:
|
|
139
|
+
img_response = self._client.session.get(image_url, timeout=timeout)
|
|
140
|
+
img_response.raise_for_status()
|
|
141
|
+
b64_data = base64.b64encode(img_response.content).decode('utf-8')
|
|
142
|
+
result_data.append(ImageData(b64_json=b64_data))
|
|
143
|
+
except Exception as e:
|
|
144
|
+
raise RuntimeError(f"Failed to download image for base64 encoding: {e}")
|
|
145
|
+
else:
|
|
146
|
+
raise ValueError("response_format must be 'url' or 'b64_json'")
|
|
147
|
+
|
|
148
|
+
return ImageResponse(created=int(time.time()), data=result_data)
|
|
149
|
+
|
|
150
|
+
except requests.RequestException as e:
|
|
151
|
+
raise RuntimeError(f"Failed to generate image with Infip API: {e}")
|
|
152
|
+
except Exception as e:
|
|
153
|
+
raise RuntimeError(f"Unexpected error during image generation: {e}")
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
class InfipAI(TTICompatibleProvider):
|
|
157
|
+
"""
|
|
158
|
+
Infip AI provider for text-to-image generation.
|
|
159
|
+
|
|
160
|
+
This provider interfaces with the Infip API to generate images from text prompts.
|
|
161
|
+
It supports multiple models and aspect ratios for flexible image creation.
|
|
162
|
+
"""
|
|
163
|
+
|
|
164
|
+
AVAILABLE_MODELS = ["img3", "img4", "uncen"]
|
|
165
|
+
|
|
166
|
+
def __init__(self, **kwargs):
|
|
167
|
+
"""
|
|
168
|
+
Initialize the Infip AI provider.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
**kwargs: Additional configuration options
|
|
172
|
+
"""
|
|
173
|
+
self.api_endpoint = "https://api.infip.pro/generate"
|
|
174
|
+
self.session = requests.Session()
|
|
175
|
+
|
|
176
|
+
# Set up headers with user agent
|
|
177
|
+
agent = LitAgent()
|
|
178
|
+
self.headers = {
|
|
179
|
+
"accept": "application/json",
|
|
180
|
+
"Content-Type": "application/json",
|
|
181
|
+
"User-Agent": agent.random()
|
|
182
|
+
}
|
|
183
|
+
self.session.headers.update(self.headers)
|
|
184
|
+
|
|
185
|
+
# Initialize the images interface
|
|
186
|
+
self.images = Images(self)
|
|
187
|
+
|
|
188
|
+
@property
|
|
189
|
+
def models(self):
|
|
190
|
+
"""
|
|
191
|
+
Get available models for the provider.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
Object with list() method that returns available model names
|
|
195
|
+
"""
|
|
196
|
+
class ModelList:
|
|
197
|
+
def list(self):
|
|
198
|
+
return InfipAI.AVAILABLE_MODELS
|
|
199
|
+
|
|
200
|
+
return ModelList()
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
if __name__ == "__main__":
|
|
204
|
+
client = InfipAI()
|
|
205
|
+
response = client.images.create(
|
|
206
|
+
model="img3",
|
|
207
|
+
prompt="A beautiful sunset over mountains",
|
|
208
|
+
n=1,
|
|
209
|
+
aspect_ratio="IMAGE_ASPECT_RATIO_LANDSCAPE",
|
|
210
|
+
seed=42
|
|
211
|
+
)
|
|
212
|
+
print(response.data[0].url)
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import base64
|
|
3
|
+
from typing import Optional, List, Dict, Any
|
|
4
|
+
from webscout.Provider.TTI.utils import (
|
|
5
|
+
ImageData,
|
|
6
|
+
ImageResponse
|
|
7
|
+
)
|
|
8
|
+
from webscout.Provider.TTI.base import TTICompatibleProvider, BaseImages
|
|
9
|
+
from webscout.litagent import LitAgent
|
|
10
|
+
from requests.adapters import HTTPAdapter
|
|
11
|
+
from urllib3.util.retry import Retry
|
|
12
|
+
import os
|
|
13
|
+
import tempfile
|
|
14
|
+
import time
|
|
15
|
+
import json
|
|
16
|
+
from io import BytesIO
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
from PIL import Image
|
|
20
|
+
except ImportError:
|
|
21
|
+
Image = None
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Images(BaseImages):
|
|
25
|
+
def __init__(self, client):
|
|
26
|
+
self._client = client
|
|
27
|
+
|
|
28
|
+
def create(
|
|
29
|
+
self,
|
|
30
|
+
*,
|
|
31
|
+
model: str,
|
|
32
|
+
prompt: str,
|
|
33
|
+
n: int = 1,
|
|
34
|
+
size: str = "1024x1024",
|
|
35
|
+
response_format: str = "b64_json",
|
|
36
|
+
user: Optional[str] = None,
|
|
37
|
+
style: str = None,
|
|
38
|
+
aspect_ratio: str = None,
|
|
39
|
+
timeout: int = 60,
|
|
40
|
+
image_format: str = "png",
|
|
41
|
+
**kwargs,
|
|
42
|
+
) -> ImageResponse:
|
|
43
|
+
if not prompt:
|
|
44
|
+
raise ValueError(
|
|
45
|
+
"Describe the image you want to create (use the 'prompt' property)."
|
|
46
|
+
)
|
|
47
|
+
# Only one image is supported by MonoChat API, but keep n for compatibility
|
|
48
|
+
body = {
|
|
49
|
+
"prompt": prompt,
|
|
50
|
+
"model": model
|
|
51
|
+
}
|
|
52
|
+
session = self._client.session
|
|
53
|
+
headers = self._client.headers
|
|
54
|
+
images = []
|
|
55
|
+
urls = []
|
|
56
|
+
|
|
57
|
+
def upload_file_with_retry(img_bytes, image_format, max_retries=3):
|
|
58
|
+
ext = "jpg" if image_format.lower() == "jpeg" else "png"
|
|
59
|
+
for attempt in range(max_retries):
|
|
60
|
+
tmp_path = None
|
|
61
|
+
try:
|
|
62
|
+
with tempfile.NamedTemporaryFile(suffix=f".{ext}", delete=False) as tmp:
|
|
63
|
+
tmp.write(img_bytes)
|
|
64
|
+
tmp.flush()
|
|
65
|
+
tmp_path = tmp.name
|
|
66
|
+
with open(tmp_path, "rb") as f:
|
|
67
|
+
files = {"fileToUpload": (f"image.{ext}", f, f"image/{ext}")}
|
|
68
|
+
data = {"reqtype": "fileupload", "json": "true"}
|
|
69
|
+
headers = {"User-Agent": LitAgent().random()}
|
|
70
|
+
if attempt > 0:
|
|
71
|
+
headers["Connection"] = "close"
|
|
72
|
+
resp = requests.post(
|
|
73
|
+
"https://catbox.moe/user/api.php",
|
|
74
|
+
files=files,
|
|
75
|
+
data=data,
|
|
76
|
+
headers=headers,
|
|
77
|
+
timeout=timeout,
|
|
78
|
+
)
|
|
79
|
+
if resp.status_code == 200 and resp.text.strip():
|
|
80
|
+
text = resp.text.strip()
|
|
81
|
+
if text.startswith("http"):
|
|
82
|
+
return text
|
|
83
|
+
try:
|
|
84
|
+
result = resp.json()
|
|
85
|
+
if "url" in result:
|
|
86
|
+
return result["url"]
|
|
87
|
+
except json.JSONDecodeError:
|
|
88
|
+
if "http" in text:
|
|
89
|
+
return text
|
|
90
|
+
except Exception:
|
|
91
|
+
if attempt < max_retries - 1:
|
|
92
|
+
time.sleep(1 * (attempt + 1))
|
|
93
|
+
finally:
|
|
94
|
+
if tmp_path and os.path.isfile(tmp_path):
|
|
95
|
+
try:
|
|
96
|
+
os.remove(tmp_path)
|
|
97
|
+
except Exception:
|
|
98
|
+
pass
|
|
99
|
+
return None
|
|
100
|
+
|
|
101
|
+
def upload_file_alternative(img_bytes, image_format):
|
|
102
|
+
try:
|
|
103
|
+
ext = "jpg" if image_format.lower() == "jpeg" else "png"
|
|
104
|
+
with tempfile.NamedTemporaryFile(suffix=f".{ext}", delete=False) as tmp:
|
|
105
|
+
tmp.write(img_bytes)
|
|
106
|
+
tmp.flush()
|
|
107
|
+
tmp_path = tmp.name
|
|
108
|
+
try:
|
|
109
|
+
if not os.path.isfile(tmp_path):
|
|
110
|
+
return None
|
|
111
|
+
with open(tmp_path, "rb") as img_file:
|
|
112
|
+
files = {"file": img_file}
|
|
113
|
+
response = requests.post("https://0x0.st", files=files)
|
|
114
|
+
response.raise_for_status()
|
|
115
|
+
image_url = response.text.strip()
|
|
116
|
+
if not image_url.startswith("http"):
|
|
117
|
+
return None
|
|
118
|
+
return image_url
|
|
119
|
+
except Exception:
|
|
120
|
+
return None
|
|
121
|
+
finally:
|
|
122
|
+
try:
|
|
123
|
+
os.remove(tmp_path)
|
|
124
|
+
except Exception:
|
|
125
|
+
pass
|
|
126
|
+
except Exception:
|
|
127
|
+
return None
|
|
128
|
+
|
|
129
|
+
try:
|
|
130
|
+
resp = session.post(
|
|
131
|
+
f"{self._client.api_endpoint}/image",
|
|
132
|
+
json=body,
|
|
133
|
+
headers=headers,
|
|
134
|
+
timeout=timeout,
|
|
135
|
+
)
|
|
136
|
+
resp.raise_for_status()
|
|
137
|
+
data = resp.json()
|
|
138
|
+
if not data.get("image"):
|
|
139
|
+
raise RuntimeError("Failed to process image. No image data found.")
|
|
140
|
+
# Always decode the base64 image
|
|
141
|
+
image_bytes = base64.b64decode(data.get("image"))
|
|
142
|
+
if response_format == "b64_json":
|
|
143
|
+
result_data = [ImageData(b64_json=data.get("image"))]
|
|
144
|
+
elif response_format == "url":
|
|
145
|
+
if Image is None:
|
|
146
|
+
raise ImportError("Pillow (PIL) is required for image format conversion.")
|
|
147
|
+
# Convert to png or jpeg in memory
|
|
148
|
+
with BytesIO(image_bytes) as input_io:
|
|
149
|
+
with Image.open(input_io) as im:
|
|
150
|
+
out_io = BytesIO()
|
|
151
|
+
if image_format.lower() == "jpeg":
|
|
152
|
+
im = im.convert("RGB")
|
|
153
|
+
im.save(out_io, format="JPEG")
|
|
154
|
+
else:
|
|
155
|
+
im.save(out_io, format="PNG")
|
|
156
|
+
img_bytes = out_io.getvalue()
|
|
157
|
+
# Try primary upload method with retries
|
|
158
|
+
uploaded_url = upload_file_with_retry(img_bytes, image_format)
|
|
159
|
+
# If primary method fails, try alternative
|
|
160
|
+
if not uploaded_url:
|
|
161
|
+
uploaded_url = upload_file_alternative(img_bytes, image_format)
|
|
162
|
+
if uploaded_url:
|
|
163
|
+
result_data = [ImageData(url=uploaded_url)]
|
|
164
|
+
else:
|
|
165
|
+
raise RuntimeError("Failed to upload image to catbox.moe using all available methods")
|
|
166
|
+
else:
|
|
167
|
+
raise ValueError("response_format must be 'url' or 'b64_json'")
|
|
168
|
+
from time import time as _time
|
|
169
|
+
return ImageResponse(created=int(_time()), data=result_data)
|
|
170
|
+
except Exception as e:
|
|
171
|
+
raise RuntimeError(f"An error occurred: {str(e)}")
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
class MonoChatAI(TTICompatibleProvider):
|
|
175
|
+
AVAILABLE_MODELS = ["nextlm-image-1", "gpt-image-1", "dall-e-3", "dall-e-2"]
|
|
176
|
+
|
|
177
|
+
def __init__(self):
|
|
178
|
+
self.api_endpoint = "https://www.chatwithmono.xyz/api"
|
|
179
|
+
self.session = requests.Session()
|
|
180
|
+
self._setup_session_with_retries()
|
|
181
|
+
self.user_agent = LitAgent().random()
|
|
182
|
+
self.headers = {
|
|
183
|
+
"accept": "*/*",
|
|
184
|
+
"content-type": "application/json",
|
|
185
|
+
"origin": "https://www.chatwithmono.xyz",
|
|
186
|
+
"referer": "https://www.chatwithmono.xyz/",
|
|
187
|
+
"user-agent": self.user_agent,
|
|
188
|
+
}
|
|
189
|
+
self.session.headers.update(self.headers)
|
|
190
|
+
self.images = Images(self)
|
|
191
|
+
|
|
192
|
+
def _setup_session_with_retries(self):
|
|
193
|
+
retry_strategy = Retry(
|
|
194
|
+
total=3,
|
|
195
|
+
status_forcelist=[429, 500, 502, 503, 504],
|
|
196
|
+
backoff_factor=1,
|
|
197
|
+
allowed_methods=["HEAD", "GET", "OPTIONS", "POST"],
|
|
198
|
+
)
|
|
199
|
+
adapter = HTTPAdapter(max_retries=retry_strategy)
|
|
200
|
+
self.session.mount("http://", adapter)
|
|
201
|
+
self.session.mount("https://", adapter)
|
|
202
|
+
|
|
203
|
+
@property
|
|
204
|
+
def models(self):
|
|
205
|
+
class _ModelList:
|
|
206
|
+
def list(inner_self):
|
|
207
|
+
return type(self).AVAILABLE_MODELS
|
|
208
|
+
return _ModelList()
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
if __name__ == "__main__":
|
|
212
|
+
from rich import print
|
|
213
|
+
client = MonoChatAI()
|
|
214
|
+
response = client.images.create(
|
|
215
|
+
model="dall-e-3",
|
|
216
|
+
prompt="A red car on a sunny day",
|
|
217
|
+
response_format="url",
|
|
218
|
+
timeout=60000,
|
|
219
|
+
)
|
|
220
|
+
print(response)
|