webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (114) hide show
  1. webscout/AIutel.py +180 -78
  2. webscout/Bing_search.py +417 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/ExaChat.py +84 -58
  8. webscout/Provider/GeminiProxy.py +140 -0
  9. webscout/Provider/HeckAI.py +85 -80
  10. webscout/Provider/Jadve.py +56 -50
  11. webscout/Provider/MCPCore.py +78 -75
  12. webscout/Provider/MiniMax.py +207 -0
  13. webscout/Provider/Nemotron.py +41 -13
  14. webscout/Provider/Netwrck.py +34 -51
  15. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
  16. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  17. webscout/Provider/OPENAI/MiniMax.py +298 -0
  18. webscout/Provider/OPENAI/README.md +32 -29
  19. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  20. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  21. webscout/Provider/OPENAI/__init__.py +17 -1
  22. webscout/Provider/OPENAI/autoproxy.py +1067 -39
  23. webscout/Provider/OPENAI/base.py +17 -76
  24. webscout/Provider/OPENAI/deepinfra.py +42 -108
  25. webscout/Provider/OPENAI/e2b.py +0 -1
  26. webscout/Provider/OPENAI/flowith.py +179 -166
  27. webscout/Provider/OPENAI/friendli.py +233 -0
  28. webscout/Provider/OPENAI/mcpcore.py +109 -70
  29. webscout/Provider/OPENAI/monochat.py +329 -0
  30. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  31. webscout/Provider/OPENAI/scirachat.py +59 -51
  32. webscout/Provider/OPENAI/toolbaz.py +3 -9
  33. webscout/Provider/OPENAI/typegpt.py +1 -1
  34. webscout/Provider/OPENAI/utils.py +19 -42
  35. webscout/Provider/OPENAI/x0gpt.py +14 -2
  36. webscout/Provider/OPENAI/xenai.py +514 -0
  37. webscout/Provider/OPENAI/yep.py +8 -2
  38. webscout/Provider/OpenGPT.py +54 -32
  39. webscout/Provider/PI.py +58 -84
  40. webscout/Provider/StandardInput.py +32 -13
  41. webscout/Provider/TTI/README.md +9 -9
  42. webscout/Provider/TTI/__init__.py +3 -1
  43. webscout/Provider/TTI/aiarta.py +92 -78
  44. webscout/Provider/TTI/bing.py +231 -0
  45. webscout/Provider/TTI/infip.py +212 -0
  46. webscout/Provider/TTI/monochat.py +220 -0
  47. webscout/Provider/TTS/speechma.py +45 -39
  48. webscout/Provider/TeachAnything.py +11 -3
  49. webscout/Provider/TextPollinationsAI.py +78 -70
  50. webscout/Provider/TogetherAI.py +350 -0
  51. webscout/Provider/Venice.py +37 -46
  52. webscout/Provider/VercelAI.py +27 -24
  53. webscout/Provider/WiseCat.py +35 -35
  54. webscout/Provider/WrDoChat.py +22 -26
  55. webscout/Provider/WritingMate.py +26 -22
  56. webscout/Provider/XenAI.py +324 -0
  57. webscout/Provider/__init__.py +10 -5
  58. webscout/Provider/deepseek_assistant.py +378 -0
  59. webscout/Provider/granite.py +48 -57
  60. webscout/Provider/koala.py +51 -39
  61. webscout/Provider/learnfastai.py +49 -64
  62. webscout/Provider/llmchat.py +79 -93
  63. webscout/Provider/llmchatco.py +63 -78
  64. webscout/Provider/multichat.py +51 -40
  65. webscout/Provider/oivscode.py +1 -1
  66. webscout/Provider/scira_chat.py +159 -96
  67. webscout/Provider/scnet.py +13 -13
  68. webscout/Provider/searchchat.py +13 -13
  69. webscout/Provider/sonus.py +12 -11
  70. webscout/Provider/toolbaz.py +25 -8
  71. webscout/Provider/turboseek.py +41 -42
  72. webscout/Provider/typefully.py +27 -12
  73. webscout/Provider/typegpt.py +41 -46
  74. webscout/Provider/uncovr.py +55 -90
  75. webscout/Provider/x0gpt.py +33 -17
  76. webscout/Provider/yep.py +79 -96
  77. webscout/auth/__init__.py +55 -0
  78. webscout/auth/api_key_manager.py +189 -0
  79. webscout/auth/auth_system.py +100 -0
  80. webscout/auth/config.py +76 -0
  81. webscout/auth/database.py +400 -0
  82. webscout/auth/exceptions.py +67 -0
  83. webscout/auth/middleware.py +248 -0
  84. webscout/auth/models.py +130 -0
  85. webscout/auth/providers.py +279 -0
  86. webscout/auth/rate_limiter.py +254 -0
  87. webscout/auth/request_models.py +127 -0
  88. webscout/auth/request_processing.py +226 -0
  89. webscout/auth/routes.py +550 -0
  90. webscout/auth/schemas.py +103 -0
  91. webscout/auth/server.py +367 -0
  92. webscout/client.py +121 -70
  93. webscout/litagent/Readme.md +68 -55
  94. webscout/litagent/agent.py +99 -9
  95. webscout/scout/core/scout.py +104 -26
  96. webscout/scout/element.py +139 -18
  97. webscout/swiftcli/core/cli.py +14 -3
  98. webscout/swiftcli/decorators/output.py +59 -9
  99. webscout/update_checker.py +31 -49
  100. webscout/version.py +1 -1
  101. webscout/webscout_search.py +4 -12
  102. webscout/webscout_search_async.py +3 -10
  103. webscout/yep_search.py +2 -11
  104. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
  105. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
  106. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
  107. webscout/Provider/HF_space/__init__.py +0 -0
  108. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  109. webscout/Provider/OPENAI/api.py +0 -1320
  110. webscout/Provider/TTI/fastflux.py +0 -233
  111. webscout/Provider/Writecream.py +0 -246
  112. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  113. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  114. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
webscout/Provider/PI.py CHANGED
@@ -172,9 +172,10 @@ class PiAI(Provider):
172
172
  voice: bool = None,
173
173
  voice_name: str = None,
174
174
  output_file: str = None
175
- ) -> dict:
175
+ ) -> Union[dict, str, Any]:
176
176
  """
177
177
  Interact with Pi.ai by sending a prompt and receiving a response.
178
+ Now supports raw streaming and non-streaming output, matching the pattern in other providers.
178
179
 
179
180
  Args:
180
181
  prompt (str): The prompt to send
@@ -209,103 +210,82 @@ class PiAI(Provider):
209
210
  }
210
211
 
211
212
  def process_stream():
212
- try: # Add outer try block for error handling
213
- # Try primary URL first
213
+ try:
214
214
  current_url = self.url
215
215
  response = self.session.post(
216
216
  current_url,
217
- # headers are set on the session
218
- # cookies are handled by the session
219
217
  json=data,
220
218
  stream=True,
221
219
  timeout=self.timeout,
222
- # proxies are set on the session
223
- impersonate="chrome110" # Use a common impersonation profile
220
+ impersonate="chrome110"
224
221
  )
225
-
226
- # If primary URL fails, try fallback URL
227
222
  if not response.ok and current_url == self.primary_url:
228
223
  current_url = self.fallback_url
229
224
  response = self.session.post(
230
225
  current_url,
231
- # headers are set on the session
232
- # cookies are handled by the session
233
226
  json=data,
234
227
  stream=True,
235
228
  timeout=self.timeout,
236
- # proxies are set on the session
237
- impersonate="chrome110" # Use a common impersonation profile
229
+ impersonate="chrome110"
238
230
  )
239
-
240
- response.raise_for_status() # Check for HTTP errors after potential fallback
241
-
242
- # --- Process response content ---
243
- # Note: curl_cffi's response.content might behave differently for streams.
244
- # It's often better to iterate directly.
245
- # output_str = response.content.decode('utf-8') # Avoid reading full content at once for streams
231
+ response.raise_for_status()
246
232
 
247
233
  sids = []
248
234
  streaming_text = ""
249
- full_raw_data_for_sids = "" # Accumulate raw data to find SIDs later
250
-
251
- # Iterate over bytes and decode manually
235
+ full_raw_data_for_sids = ""
236
+
237
+ processed_stream = sanitize_stream(
238
+ data=response.iter_lines(),
239
+ intro_value="data: ",
240
+ to_json=True,
241
+ content_extractor=self._pi_extractor,
242
+ raw=raw
243
+ )
244
+ for content in processed_stream:
245
+ if raw:
246
+ yield content
247
+ else:
248
+ if content and isinstance(content, str):
249
+ streaming_text += content
250
+ yield {"text": streaming_text}
251
+ # SID extraction for voice
252
252
  for line_bytes in response.iter_lines():
253
253
  if line_bytes:
254
254
  line = line_bytes.decode('utf-8')
255
- full_raw_data_for_sids += line + "\n" # Accumulate for SID extraction
256
-
257
- if line.startswith("data: "):
258
- json_line_str = line[6:] # Get the JSON part as string
259
- try:
260
- # Process this single JSON line string with sanitize_stream
261
- processed_gen = sanitize_stream(
262
- data=json_line_str,
263
- to_json=True,
264
- content_extractor=self._pi_extractor
265
- )
266
- chunk_text = next(processed_gen, None) # Get the single extracted text item
267
- if chunk_text and isinstance(chunk_text, str):
268
- streaming_text += chunk_text
269
- yield {"text": streaming_text} # Always yield dict with aggregated text
270
- except (StopIteration, json.JSONDecodeError, UnicodeDecodeError):
271
- continue # Skip if sanitize_stream fails or yields nothing
272
- # Extract SIDs after processing the stream
255
+ full_raw_data_for_sids += line + "\n"
273
256
  sids = re.findall(r'"sid":"(.*?)"', full_raw_data_for_sids)
274
257
  second_sid = sids[1] if len(sids) >= 2 else None
275
-
276
258
  if voice and voice_name and second_sid:
277
259
  threading.Thread(
278
260
  target=self.download_audio_threaded,
279
261
  args=(voice_name, second_sid, output_file)
280
262
  ).start()
281
-
282
- # Update history and last response after stream finishes
283
- self.last_response = dict(text=streaming_text)
284
- self.conversation.update_chat_history(
285
- prompt, streaming_text
286
- )
287
-
288
- except CurlError as e: # Catch CurlError
263
+ if not raw:
264
+ self.last_response = dict(text=streaming_text)
265
+ self.conversation.update_chat_history(prompt, streaming_text)
266
+ except CurlError as e:
289
267
  raise exceptions.FailedToGenerateResponseError(f"API request failed (CurlError): {e}") from e
290
- except Exception as e: # Catch other potential exceptions (like HTTPError)
268
+ except Exception as e:
291
269
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
292
270
  raise exceptions.FailedToGenerateResponseError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
293
271
 
294
-
295
272
  if stream:
296
273
  return process_stream()
297
274
  else:
298
- # For non-stream, collect all responses and return the final one
299
- final_text = ""
300
- # process_stream always yields dicts now
301
- for res in process_stream():
302
- if isinstance(res, dict) and "text" in res:
303
- final_text = res["text"] # Keep updating with the latest aggregated text
304
-
305
- # last_response and history are updated within process_stream
306
- # Return the final aggregated response dict or raw text
307
- return final_text if raw else self.last_response
308
-
275
+ full_response = ""
276
+ for chunk in process_stream():
277
+ if raw:
278
+ if isinstance(chunk, str):
279
+ full_response += chunk
280
+ else:
281
+ if isinstance(chunk, dict) and "text" in chunk:
282
+ full_response = chunk["text"]
283
+ if not raw:
284
+ self.last_response = {"text": full_response}
285
+ self.conversation.update_chat_history(prompt, full_response)
286
+ return self.last_response
287
+ else:
288
+ return full_response
309
289
 
310
290
  def chat(
311
291
  self,
@@ -315,8 +295,9 @@ class PiAI(Provider):
315
295
  conversationally: bool = False,
316
296
  voice: bool = None,
317
297
  voice_name: str = None,
318
- output_file: str = None
319
- ) -> str:
298
+ output_file: str = None,
299
+ raw: bool = False, # Added raw parameter
300
+ ) -> Union[str, Any]:
320
301
  """
321
302
  Generates a response based on the provided prompt.
322
303
 
@@ -339,35 +320,37 @@ class PiAI(Provider):
339
320
 
340
321
  if stream:
341
322
  def stream_generator():
342
- # ask() yields dicts or raw JSON objects when streaming
343
323
  gen = self.ask(
344
324
  prompt,
345
325
  stream=True,
346
- raw=False, # Ensure ask yields dicts for get_message
326
+ raw=raw,
347
327
  optimizer=optimizer,
348
328
  conversationally=conversationally,
349
329
  voice=voice,
350
330
  voice_name=voice_name,
351
331
  output_file=output_file
352
332
  )
353
- for response_dict in gen:
354
- # get_message expects dict
355
- yield self.get_message(response_dict)
333
+ for response in gen:
334
+ if raw:
335
+ yield response
336
+ else:
337
+ yield self.get_message(response)
356
338
  return stream_generator()
357
339
  else:
358
- # ask() returns dict or raw text when not streaming
359
340
  response_data = self.ask(
360
341
  prompt,
361
342
  stream=False,
362
- raw=False, # Ensure ask returns dict for get_message
343
+ raw=raw,
363
344
  optimizer=optimizer,
364
345
  conversationally=conversationally,
365
346
  voice=voice,
366
347
  voice_name=voice_name,
367
348
  output_file=output_file
368
349
  )
369
- # get_message expects dict
370
- return self.get_message(response_data)
350
+ if raw:
351
+ return response_data
352
+ else:
353
+ return self.get_message(response_data)
371
354
 
372
355
  def get_message(self, response: dict) -> str:
373
356
  """Retrieves message only from response"""
@@ -411,19 +394,10 @@ if __name__ == '__main__':
411
394
  try: # Add try-except block for testing
412
395
  ai = PiAI(timeout=60)
413
396
  print("[bold blue]Testing Chat (Stream):[/bold blue]")
414
- response = ai.chat(input(">>> "), stream=True)
397
+ response = ai.chat("hi", stream=True, raw=False)
415
398
  full_response = ""
416
399
  for chunk in response:
417
400
  print(chunk, end="", flush=True)
418
- full_response += chunk
419
- print("\n[bold green]Stream Test Complete.[/bold green]")
420
-
421
- # Optional: Test non-stream
422
- # print("\n[bold blue]Testing Chat (Non-Stream):[/bold blue]")
423
- # response_non_stream = ai.chat("Hello again", stream=False)
424
- # print(response_non_stream)
425
- # print("[bold green]Non-Stream Test Complete.[/bold green]")
426
-
427
401
  except exceptions.FailedToGenerateResponseError as e:
428
402
  print(f"\n[bold red]API Error:[/bold red] {e}")
429
403
  except Exception as e:
@@ -1,7 +1,7 @@
1
1
  from curl_cffi.requests import Session
2
2
  import uuid
3
3
  import re
4
- from typing import Any, Dict, Optional, Union
4
+ from typing import Any, Dict, Generator, Optional, Union
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
@@ -166,7 +166,8 @@ class StandardInputAI(Provider):
166
166
  prompt: str,
167
167
  optimizer: str = None,
168
168
  conversationally: bool = False,
169
- ) -> Dict[str, Any]:
169
+ raw: bool = False, # Added raw parameter
170
+ ) -> Union[Dict[str, Any], Generator[str, None, None]]:
170
171
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
171
172
  if optimizer:
172
173
  if optimizer in self.__available_optimizers:
@@ -233,9 +234,12 @@ class StandardInputAI(Provider):
233
234
  for content_chunk in processed_stream:
234
235
  if content_chunk and isinstance(content_chunk, str):
235
236
  full_response += content_chunk
236
-
237
+ if raw:
238
+ yield content_chunk
237
239
  self.last_response = {"text": full_response}
238
240
  self.conversation.update_chat_history(prompt, full_response)
241
+ if raw:
242
+ return full_response
239
243
  return {"text": full_response}
240
244
  except Exception as e:
241
245
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
@@ -245,17 +249,32 @@ class StandardInputAI(Provider):
245
249
  prompt: str,
246
250
  optimizer: str = None,
247
251
  conversationally: bool = False,
248
- ) -> str:
249
- return self.get_message(
250
- self.ask(
251
- prompt, optimizer=optimizer, conversationally=conversationally
252
+ raw: bool = False, # Added raw parameter
253
+ ) -> Union[str, Generator[str, None, None]]:
254
+ def for_stream():
255
+ gen = self.ask(
256
+ prompt, optimizer=optimizer, conversationally=conversationally, raw=raw
252
257
  )
253
- )
254
-
255
- def get_message(self, response: dict) -> str:
256
- assert isinstance(response, dict), "Response should be of dict data-type only"
257
- # Extractor handles formatting
258
- return response.get("text", "").replace('\\n', '\n').replace('\\n\\n', '\n\n')
258
+ if hasattr(gen, '__iter__') and not isinstance(gen, dict):
259
+ for chunk in gen:
260
+ if raw:
261
+ yield chunk
262
+ else:
263
+ yield self.get_message({"text": chunk})
264
+ else:
265
+ if raw:
266
+ yield gen if isinstance(gen, str) else self.get_message(gen)
267
+ else:
268
+ yield self.get_message(gen)
269
+ def for_non_stream():
270
+ result = self.ask(
271
+ prompt, optimizer=optimizer, conversationally=conversationally, raw=raw
272
+ )
273
+ if raw:
274
+ return result if isinstance(result, str) else self.get_message(result)
275
+ else:
276
+ return self.get_message(result)
277
+ return for_stream() if raw else for_non_stream()
259
278
 
260
279
  if __name__ == "__main__":
261
280
  print("-" * 100)
@@ -14,14 +14,14 @@ These providers allow you to easily generate AI‑created art from text prompts
14
14
 
15
15
  ## 📦 Supported Providers
16
16
 
17
- | Provider | Available Models (examples) |
18
- |-----------------|----------------------------------------------------|
19
- | `AIArta` | `flux`, `medieval`, `dreamshaper_xl`, ... |
20
- | `FastFluxAI` | `flux_1_schnell` |
21
- | `MagicStudioAI` | `magicstudio` |
22
- | `PixelMuse` | `flux-schnell`, `imagen-3`, `recraft-v3` |
23
- | `PiclumenAI` | `piclumen-v1` |
24
- | `PollinationsAI`| `flux`, `turbo`, `gptimage` |
17
+ | Provider | Available Models (examples) |
18
+ | ---------------- | ----------------------------------------- |
19
+ | `AIArta` | `flux`, `medieval`, `dreamshaper_xl`, ... |
20
+ | `InfipAI` | `img3`, `img4`, `uncen` |
21
+ | `MagicStudioAI` | `magicstudio` |
22
+ | `PixelMuse` | `flux-schnell`, `imagen-3`, `recraft-v3` |
23
+ | `PiclumenAI` | `piclumen-v1` |
24
+ | `PollinationsAI` | `flux`, `turbo`, `gptimage` |
25
25
 
26
26
  > **Note**: Some providers require the `Pillow` package for image processing.
27
27
 
@@ -71,7 +71,7 @@ response = client.images.create(
71
71
  ## 🔧 Provider Specifics
72
72
 
73
73
  - **AIArta** – Uses Firebase authentication tokens and supports many tattoo‑style models.
74
- - **FastFluxAI** – Simple API for quick image generation.
74
+ - **InfipAI** – Offers various models for different image styles.
75
75
  - **MagicStudioAI** – Generates images through MagicStudio's public endpoint.
76
76
  - **PixelMuse** – Supports several models and converts images from WebP.
77
77
  - **PiclumenAI** – Returns JPEG images directly from the API.
@@ -1,9 +1,11 @@
1
1
  from .pollinations import *
2
2
  from .piclumen import *
3
3
  from .magicstudio import *
4
- from .fastflux import *
5
4
  from .pixelmuse import *
6
5
  from .aiarta import *
7
6
  from .gpt1image import *
8
7
  from .imagen import *
9
8
  from .together import *
9
+ from .bing import *
10
+ from .infip import *
11
+ from .monochat import *
@@ -24,6 +24,8 @@ import tempfile
24
24
  from webscout.litagent import LitAgent
25
25
  import time
26
26
  import json
27
+ import random
28
+ from pathlib import Path
27
29
 
28
30
  try:
29
31
  from PIL import Image
@@ -31,6 +33,10 @@ except ImportError:
31
33
  Image = None
32
34
 
33
35
 
36
+ class ModelNotFoundError(Exception):
37
+ pass
38
+
39
+
34
40
  class Images(BaseImages):
35
41
  def __init__(self, client: "AIArta"):
36
42
  self._client = client
@@ -140,22 +146,23 @@ class Images(BaseImages):
140
146
  gen_headers = {
141
147
  "Authorization": auth_data.get("idToken"),
142
148
  }
143
- # Remove content-type header for form data
144
149
  if "content-type" in self._client.session.headers:
145
150
  del self._client.session.headers["content-type"]
146
- # get_model now returns the proper style name from model_aliases
147
- style_value = self._client.get_model(model)
151
+ # Use the model name directly, not as 'style'
148
152
  image_payload = {
149
153
  "prompt": str(prompt),
150
154
  "negative_prompt": str(
151
155
  kwargs.get("negative_prompt", "blurry, deformed hands, ugly")
152
156
  ),
153
- "style": str(style_value),
154
- "images_num": str(1), # Generate one image at a time in the loop
157
+ "style": str(model), # Use 'style' key for the model name
158
+ "images_num": str(1),
155
159
  "cfg_scale": str(kwargs.get("guidance_scale", 7)),
156
160
  "steps": str(kwargs.get("num_inference_steps", 30)),
157
161
  "aspect_ratio": str(aspect_ratio),
158
162
  }
163
+ # Remove 'model' from payload if present
164
+ if "model" in image_payload:
165
+ del image_payload["model"]
159
166
  # Step 2: Generate Image (send as form data, not JSON)
160
167
  image_response = self._client.session.post(
161
168
  self._client.image_generation_url,
@@ -241,68 +248,67 @@ class Images(BaseImages):
241
248
 
242
249
 
243
250
  class AIArta(TTICompatibleProvider):
244
- # Model aliases mapping from lowercase keys to proper API style names
245
- model_aliases = {
246
- "flux": "Flux",
247
- "medieval": "Medieval",
248
- "vincent_van_gogh": "Vincent Van Gogh",
249
- "f_dev": "F Dev",
250
- "low_poly": "Low Poly",
251
- "dreamshaper_xl": "Dreamshaper-xl",
252
- "anima_pencil_xl": "Anima-pencil-xl",
253
- "biomech": "Biomech",
254
- "trash_polka": "Trash Polka",
255
- "no_style": "No Style",
256
- "cheyenne_xl": "Cheyenne-xl",
257
- "chicano": "Chicano",
258
- "embroidery_tattoo": "Embroidery tattoo",
259
- "red_and_black": "Red and Black",
260
- "fantasy_art": "Fantasy Art",
261
- "watercolor": "Watercolor",
262
- "dotwork": "Dotwork",
263
- "old_school_colored": "Old school colored",
264
- "realistic_tattoo": "Realistic tattoo",
265
- "japanese_2": "Japanese_2",
266
- "realistic_stock_xl": "Realistic-stock-xl",
267
- "f_pro": "F Pro",
268
- "revanimated": "RevAnimated",
269
- "katayama_mix_xl": "Katayama-mix-xl",
270
- "sdxl_l": "SDXL L",
271
- "cor_epica_xl": "Cor-epica-xl",
272
- "anime_tattoo": "Anime tattoo",
273
- "new_school": "New School",
274
- "death_metal": "Death metal",
275
- "old_school": "Old School",
276
- "juggernaut_xl": "Juggernaut-xl",
277
- "photographic": "Photographic",
278
- "sdxl_1_0": "SDXL 1.0",
279
- "graffiti": "Graffiti",
280
- "mini_tattoo": "Mini tattoo",
281
- "surrealism": "Surrealism",
282
- "neo_traditional": "Neo-traditional",
283
- "on_limbs_black": "On limbs black",
284
- "yamers_realistic_xl": "Yamers-realistic-xl",
285
- "pony_xl": "Pony-xl",
286
- "playground_xl": "Playground-xl",
287
- "anything_xl": "Anything-xl",
288
- "flame_design": "Flame design",
289
- "kawaii": "Kawaii",
290
- "cinematic_art": "Cinematic Art",
291
- "professional": "Professional",
292
- "black_ink": "Black Ink",
293
- }
294
-
295
- AVAILABLE_MODELS = list(model_aliases.keys())
296
- default_model = "Flux"
297
- default_image_model = default_model
251
+ url = "https://ai-arta.com"
252
+ auth_url = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
253
+ token_refresh_url = "https://securetoken.googleapis.com/v1/token?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
254
+ image_generation_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image"
255
+ status_check_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image/{record_id}/status"
256
+ AVAILABLE_MODELS = [
257
+ "Anything-xl",
258
+ "High GPT4o",
259
+ "On limbs black",
260
+ "F Dev",
261
+ "SDXL 1.0",
262
+ "Old School",
263
+ "Vincent Van Gogh",
264
+ "Cor-epica-xl",
265
+ "Professional",
266
+ "Cheyenne-xl",
267
+ "Chicano",
268
+ "SDXL L",
269
+ "Black Ink",
270
+ "Juggernaut-xl",
271
+ "Cinematic Art",
272
+ "Dreamshaper-xl",
273
+ "Fantasy Art",
274
+ "Neo-traditional",
275
+ "Realistic-stock-xl",
276
+ "Flame design",
277
+ "Japanese_2",
278
+ "Medieval",
279
+ "Surrealism",
280
+ "Dotwork",
281
+ "Graffiti",
282
+ "RevAnimated",
283
+ "On limbs color",
284
+ "Old school colored",
285
+ "GPT4o Ghibli",
286
+ "Low Poly",
287
+ "GPT4o",
288
+ "No Style",
289
+ "Anime",
290
+ "tattoo",
291
+ "Embroidery tattoo",
292
+ "Mini tattoo",
293
+ "Realistic tattoo",
294
+ "Playground-xl",
295
+ "Watercolor",
296
+ "F Pro",
297
+ "Kawaii",
298
+ "Photographic",
299
+ "Katayama-mix-xl",
300
+ "Death metal",
301
+ "New School",
302
+ "Pony-xl",
303
+ "Anima-pencil-xl",
304
+ "Flux",
305
+ "Biomech",
306
+ "Yamers-realistic-xl",
307
+ "Trash Polka",
308
+ "Red and Black",
309
+ ]
298
310
 
299
311
  def __init__(self):
300
- self.image_generation_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image"
301
- self.status_check_url = (
302
- "https://img-gen-prod.ai-arta.com/api/v1/text2image/{record_id}/status"
303
- )
304
- self.auth_url = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
305
- self.token_refresh_url = "https://securetoken.googleapis.com/v1/token?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
306
312
  self.session = requests.Session()
307
313
  self.user_agent = LitAgent().random()
308
314
  self.headers = {
@@ -316,11 +322,11 @@ class AIArta(TTICompatibleProvider):
316
322
  self.images = Images(self)
317
323
 
318
324
  def get_auth_file(self) -> str:
319
- path = os.path.join(os.path.expanduser("~"), ".ai_arta_cookies")
320
- if not os.path.exists(path):
321
- os.makedirs(path)
325
+ import tempfile
326
+ # Use a temp file in the system's temp directory, unique per class
322
327
  filename = f"auth_{self.__class__.__name__}.json"
323
- return os.path.join(path, filename)
328
+ temp_dir = tempfile.gettempdir()
329
+ return os.path.join(temp_dir, filename)
324
330
 
325
331
  def create_token(self, path: str) -> Dict[str, Any]:
326
332
  auth_payload = {"clientType": "CLIENT_TYPE_ANDROID"}
@@ -369,17 +375,25 @@ class AIArta(TTICompatibleProvider):
369
375
  return auth_data
370
376
  return self.create_token(path)
371
377
 
372
- def get_model(self, model_name: str) -> str:
373
- # Convert to lowercase for lookup
374
- model_key = model_name.lower()
375
- # Return the proper style name from model_aliases, or the original if not found
376
- return self.model_aliases.get(model_key, model_name)
378
+ def get_model(self, model: str) -> str:
379
+ if not model:
380
+ return self.default_model
381
+ if model in self.models:
382
+ return model
383
+ raise ModelNotFoundError(f"Model {model} not found")
377
384
 
378
385
  @property
379
386
  def models(self):
380
387
  class _ModelList:
381
388
  def list(inner_self):
382
389
  return type(self).AVAILABLE_MODELS
390
+ return _ModelList()
391
+
392
+ @property
393
+ def models_list(self):
394
+ class _ModelList:
395
+ def list(inner_self):
396
+ return type(self).models
383
397
 
384
398
  return _ModelList()
385
399
 
@@ -390,10 +404,10 @@ if __name__ == "__main__":
390
404
 
391
405
  client = AIArta()
392
406
  response = client.images.create(
393
- model="flux",
394
- prompt="a white siamese cat",
407
+ model="GPT4o",
408
+ prompt="Chitt Robot saying 'Hello World'",
395
409
  response_format="url",
396
- n=2,
397
- timeout=30,
410
+ n=1,
411
+ timeout=3000,
398
412
  )
399
413
  print(response)