webscout 6.0__py3-none-any.whl → 6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/AIauto.py +77 -259
  2. webscout/Agents/Onlinesearcher.py +22 -10
  3. webscout/Agents/functioncall.py +2 -2
  4. webscout/Bard.py +21 -21
  5. webscout/Extra/autollama.py +37 -20
  6. webscout/Local/__init__.py +6 -7
  7. webscout/Local/formats.py +406 -194
  8. webscout/Local/model.py +1074 -477
  9. webscout/Local/samplers.py +108 -144
  10. webscout/Local/thread.py +251 -410
  11. webscout/Local/ui.py +401 -0
  12. webscout/Local/utils.py +338 -136
  13. webscout/Provider/Amigo.py +51 -38
  14. webscout/Provider/Deepseek.py +7 -6
  15. webscout/Provider/EDITEE.py +2 -2
  16. webscout/Provider/GPTWeb.py +1 -1
  17. webscout/Provider/Llama3.py +1 -1
  18. webscout/Provider/NinjaChat.py +200 -0
  19. webscout/Provider/OLLAMA.py +1 -1
  20. webscout/Provider/Perplexity.py +1 -1
  21. webscout/Provider/Reka.py +12 -5
  22. webscout/Provider/TTI/AIuncensored.py +103 -0
  23. webscout/Provider/TTI/Nexra.py +3 -3
  24. webscout/Provider/TTI/__init__.py +4 -2
  25. webscout/Provider/TTI/aiforce.py +2 -2
  26. webscout/Provider/TTI/imgninza.py +136 -0
  27. webscout/Provider/TTI/talkai.py +116 -0
  28. webscout/Provider/TeachAnything.py +0 -3
  29. webscout/Provider/Youchat.py +1 -1
  30. webscout/Provider/__init__.py +16 -12
  31. webscout/Provider/{ChatHub.py → aimathgpt.py} +72 -88
  32. webscout/Provider/cerebras.py +143 -123
  33. webscout/Provider/cleeai.py +1 -1
  34. webscout/Provider/felo_search.py +1 -1
  35. webscout/Provider/gaurish.py +207 -0
  36. webscout/Provider/geminiprorealtime.py +160 -0
  37. webscout/Provider/genspark.py +1 -1
  38. webscout/Provider/julius.py +8 -3
  39. webscout/Provider/learnfastai.py +1 -1
  40. webscout/Provider/{aigames.py → llmchat.py} +74 -84
  41. webscout/Provider/promptrefine.py +3 -1
  42. webscout/Provider/talkai.py +196 -0
  43. webscout/Provider/turboseek.py +3 -8
  44. webscout/Provider/tutorai.py +1 -1
  45. webscout/__init__.py +2 -43
  46. webscout/exceptions.py +5 -1
  47. webscout/tempid.py +4 -73
  48. webscout/utils.py +3 -0
  49. webscout/version.py +1 -1
  50. webscout/webai.py +1 -1
  51. webscout/webscout_search.py +154 -123
  52. {webscout-6.0.dist-info → webscout-6.2.dist-info}/METADATA +164 -245
  53. {webscout-6.0.dist-info → webscout-6.2.dist-info}/RECORD +57 -55
  54. webscout/Local/rawdog.py +0 -946
  55. webscout/Provider/BasedGPT.py +0 -214
  56. webscout/Provider/TTI/amigo.py +0 -148
  57. webscout/Provider/bixin.py +0 -264
  58. webscout/Provider/xdash.py +0 -182
  59. webscout/websx_search.py +0 -19
  60. {webscout-6.0.dist-info → webscout-6.2.dist-info}/LICENSE.md +0 -0
  61. {webscout-6.0.dist-info → webscout-6.2.dist-info}/WHEEL +0 -0
  62. {webscout-6.0.dist-info → webscout-6.2.dist-info}/entry_points.txt +0 -0
  63. {webscout-6.0.dist-info → webscout-6.2.dist-info}/top_level.txt +0 -0
@@ -64,23 +64,24 @@ class DeepSeek(Provider):
64
64
  self.timeout = timeout
65
65
  self.last_response = {}
66
66
  self.headers = {
67
- "authority": "chat.deepseek.com",
68
67
  "accept": "*/*",
69
68
  "accept-encoding": "gzip, deflate, br, zstd",
70
69
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
71
70
  "authorization": f"Bearer {self.api_token}",
71
+ "content-length": "128",
72
72
  "content-type": "application/json",
73
73
  "dnt": "1",
74
74
  "origin": "https://chat.deepseek.com",
75
- "referer": "https://chat.deepseek.com",
76
- "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
75
+ "priority": "u=1, i",
76
+ "referer": "https://chat.deepseek.com/",
77
+ "sec-ch-ua": '"Chromium";v="130", "Microsoft Edge";v="130", "Not?A_Brand";v="99"',
77
78
  "sec-ch-ua-mobile": "?0",
78
79
  "sec-ch-ua-platform": '"Windows"',
79
80
  "sec-fetch-dest": "empty",
80
81
  "sec-fetch-mode": "cors",
81
82
  "sec-fetch-site": "same-origin",
82
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
83
- "x-app-version": "20240126.0"
83
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0",
84
+ "x-app-version": "20241018.0"
84
85
  }
85
86
  self.__available_optimizers = (
86
87
  method
@@ -223,4 +224,4 @@ if __name__ == '__main__':
223
224
  ai = DeepSeek(api_key="", timeout=5000)
224
225
  response = ai.chat("write a poem about AI", stream=True)
225
226
  for chunk in response:
226
- print(chunk, end="", flush=True)
227
+ print(chunk, end="", flush=True)
@@ -1,4 +1,4 @@
1
- import requests
1
+ import cloudscraper
2
2
  from webscout.AIutel import Optimizers
3
3
  from webscout.AIutel import Conversation, Proxy
4
4
  from webscout.AIutel import AwesomePrompts, sanitize_stream
@@ -49,7 +49,7 @@ class Editee(Provider):
49
49
  if model not in self.AVAILABLE_MODELS:
50
50
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
51
51
 
52
- self.session = requests.Session()
52
+ self.session = cloudscraper.create_scraper()
53
53
  self.is_conversation = is_conversation
54
54
  self.max_tokens_to_sample = max_tokens
55
55
  self.api_endpoint = "https://editee.com/submit/chatgptfree"
@@ -188,6 +188,6 @@ class GPTWeb(Provider):
188
188
  if __name__ == '__main__':
189
189
  from rich import print
190
190
  ai = GPTWeb()
191
- response = ai.chat(input(">>> "))
191
+ response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
192
192
  for chunk in response:
193
193
  print(chunk, end='', flush=True)
@@ -183,7 +183,7 @@ class LLAMA3(Provider):
183
183
 
184
184
  if __name__ == "__main__":
185
185
  from rich import print
186
- ai = LLAMA3(api_key='7979b01c-c5ea-40df-9198-f45733fa2208')
186
+ ai = LLAMA3(api_key='')
187
187
  response = ai.chat(input(">>> "))
188
188
  for chunks in response:
189
189
  print(chunks, end="", flush=True)
@@ -0,0 +1,200 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, List, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+
12
+
13
+ class NinjaChat(Provider):
14
+ """
15
+ A class to interact with the NinjaChat API.
16
+ """
17
+
18
+ AVAILABLE_MODELS = {
19
+ "mistral": "https://www.ninjachat.ai/api/mistral",
20
+ "perplexity": "https://www.ninjachat.ai/api/perplexity",
21
+ "claude-3.5": "https://www.ninjachat.ai/api/claude-pro",
22
+ "gemini-1.5-pro": "https://www.ninjachat.ai/api/gemini",
23
+ "llama": "https://www.ninjachat.ai/api/llama-pro",
24
+ "o1-mini": "https://www.ninjachat.ai/api/o1-mini",
25
+ }
26
+
27
+ def __init__(
28
+ self,
29
+ is_conversation: bool = True,
30
+ max_tokens: int = 2049,
31
+ timeout: int = 30,
32
+ intro: str = None, # System message/intro prompt
33
+ filepath: str = None,
34
+ update_file: bool = True,
35
+ proxies: dict = {},
36
+ history_offset: int = 10250,
37
+ act: str = None,
38
+ model: str = "perplexity", # Default model
39
+ system_message: str = "You are a helpful AI assistant.", # Default system message
40
+ ):
41
+ """Initializes the NinjaChat API client."""
42
+
43
+ self.headers = {
44
+ "Accept": "*/*",
45
+ "Accept-Encoding": "gzip, deflate, br, zstd",
46
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
47
+ "Content-Type": "application/json",
48
+ "Cookie": "_ga=GA1.1.298084589.1727859540; _ga_11N4NZX9WP=GS1.1.1727859539.1.0.1727859552.0.0.0; __stripe_mid=4f63db68-c41d-45b4-9111-2457a6cf1b538696a9; __Host-next-auth.csrf-token=a5cb5a40c73df3e808ebc072dcb116fe7dd4b9b8d39d8002ef7e54153e6aa665%7Cbffe3f934f2db43330d281453af2cd0b4757f439b958f2d1a06a36cea63e9cc8; __stripe_sid=118678d1-403a-43f9-b3b9-d80ed9392a0d2ac131; __Secure-next-auth.callback-url=https%3A%2F%2Fwww.ninjachat.ai%2Fdashboard; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..l34CIFGTJCtstUqU.VjEYgaUUPpgp-49wueXFlFYvbm8csuyX0HichHrPNH45nX4s_LeZX2VhK1ZvwmUpfdlsMD4bi8VzFfQUEgs8FLPhkbKnoZDP939vobV7K_2Q9CA8PgC0oXEsQf_azWmILZ8rOE37uYzTu1evCnOjCucDYrC1ONXzl9NbGNPVa8AQr7hXvatuqtqe-lBUQXWdrw3QLulbqxvh6yLoxJj04gqC-nPudGciU-_-3TZJYr98u8o7KtUGio1ZX9vHNFfv8djWM1NCkji3Kl9eUhiyMj71.6uhUS39UcCVRa6tFzHxz2g; ph_phc_wWUtqcGWqyyochfPvwKlXMkMjIoIQKUwcnHE3KMKm8K_posthog=%7B%22distinct_id%22%3A%2201924c74-2926-7042-a1fb-5b5debdbcd1c%22%2C%22%24sesid%22%3A%5B1727966419499%2C%22019252bb-9de4-75db-9f85-a389fb401670%22%2C1727964880355%5D%7D",
49
+ "DNT": "1",
50
+ "Origin": "https://www.ninjachat.ai",
51
+ "Priority": "u=1, i",
52
+ "Referer": "https://www.ninjachat.ai/dashboard",
53
+ "Sec-CH-UA": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
54
+ "Sec-CH-UA-Mobile": "?0",
55
+ "Sec-CH-UA-Platform": '"Windows"',
56
+ "Sec-Fetch-Dest": "empty",
57
+ "Sec-Fetch-Mode": "cors",
58
+ "Sec-Fetch-Site": "same-origin",
59
+ "User-Agent": (
60
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
61
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
62
+ "Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"
63
+ )
64
+ }
65
+ self.session = requests.Session()
66
+ self.session.headers.update(self.headers)
67
+ self.session.proxies.update(proxies)
68
+ self.timeout = timeout
69
+ self.last_response = {}
70
+ self.system_message = system_message
71
+
72
+ self.is_conversation = is_conversation
73
+ self.max_tokens_to_sample = max_tokens
74
+ self.__available_optimizers = (
75
+ method
76
+ for method in dir(Optimizers)
77
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
78
+ )
79
+
80
+ #Set the intro/system message
81
+ Conversation.intro = (
82
+ AwesomePrompts().get_act(
83
+ act, raise_not_found=True, default=None, case_insensitive=True
84
+ )
85
+ if act
86
+ else intro or system_message or Conversation.intro #Priority: act > intro > system_message > Conversation.intro
87
+
88
+ )
89
+
90
+
91
+ self.conversation = Conversation(
92
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
93
+ )
94
+ self.conversation.history_offset = history_offset
95
+
96
+ if model not in self.AVAILABLE_MODELS:
97
+ raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
98
+ self.model_url = self.AVAILABLE_MODELS[model]
99
+ self.headers["Referer"] = self.model_url # Set initial referer
100
+
101
+
102
+
103
+ def ask(
104
+ self,
105
+ prompt: str,
106
+ stream: bool = False,
107
+ raw: bool = False,
108
+ optimizer: str = None,
109
+ conversationally: bool = False,
110
+ ) -> Union[Dict, Generator]:
111
+
112
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt, intro=Conversation.intro)
113
+
114
+ if optimizer:
115
+ if optimizer in self.__available_optimizers:
116
+ conversation_prompt = getattr(Optimizers, optimizer)(
117
+ conversation_prompt if conversationally else prompt
118
+ )
119
+ else:
120
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
121
+
122
+ #Include the system message in the payload
123
+ payload = {
124
+ "messages": [
125
+ {"role": "system", "content": self.system_message}, # System message here
126
+ {"role": "user", "content": conversation_prompt},
127
+ ],
128
+ "stream": stream # Now passed dynamically
129
+ }
130
+
131
+ def for_stream():
132
+ try:
133
+ with requests.post(self.model_url, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
134
+ response.raise_for_status()
135
+ streaming_text = ""
136
+ for line in response.iter_lines(decode_unicode=True):
137
+ if line:
138
+ if line.startswith("0:"):
139
+ try:
140
+ text = json.loads(line[2:]) # Extract streaming text
141
+ streaming_text += text #Accumulate for history
142
+ resp = dict(text=text)
143
+ yield resp if raw else resp
144
+ except json.JSONDecodeError:
145
+ print("\n[Error] Failed to decode JSON content.")
146
+
147
+ elif line.startswith("d:"):
148
+ break #End of stream
149
+ self.conversation.update_chat_history(prompt, streaming_text)
150
+ self.last_response.update({"text": streaming_text})
151
+ except requests.exceptions.RequestException as e:
152
+ print("An error occurred:", e)
153
+
154
+
155
+
156
+ def for_non_stream():
157
+
158
+ for _ in for_stream():
159
+ pass
160
+ return self.last_response
161
+
162
+
163
+ return for_stream() if stream else for_non_stream()
164
+
165
+ def chat(
166
+ self,
167
+ prompt: str,
168
+ stream: bool = False,
169
+ optimizer: str = None,
170
+ conversationally: bool = False,
171
+ ) -> Union[str, Generator]:
172
+
173
+ def for_stream():
174
+ for response in self.ask(
175
+ prompt, True, optimizer=optimizer, conversationally=conversationally
176
+ ):
177
+ yield self.get_message(response)
178
+
179
+ def for_non_stream():
180
+ return self.get_message(
181
+ self.ask(
182
+ prompt, False, optimizer=optimizer, conversationally=conversationally
183
+ )
184
+ )
185
+ return for_stream() if stream else for_non_stream()
186
+
187
+ def get_message(self, response: dict) -> str:
188
+ assert isinstance(response, dict), "Response should be of dict data-type only"
189
+ return response["text"]
190
+
191
+
192
+
193
+ if __name__ == "__main__":
194
+ from rich import print
195
+ bot = NinjaChat(model="perplexity", system_message="You are a creative writer.")
196
+
197
+ response = bot.chat("tell me about Abhay koul, HelpingAI ", stream=True)
198
+
199
+ for chunk in response:
200
+ print(chunk, end="", flush=True)
@@ -166,7 +166,7 @@ class OLLAMA(Provider):
166
166
  assert isinstance(response, dict), "Response should be of dict data-type only"
167
167
  return response["text"]
168
168
  if __name__ == "__main__":
169
- ai = OLLAMA(model="qwen2:0.5b")
169
+ ai = OLLAMA(model="llama3.2:1b")
170
170
  response = ai.chat("write a poem about AI", stream=True)
171
171
  for chunk in response:
172
172
  print(chunk, end="", flush=True)
@@ -592,7 +592,7 @@ class Perplexity(Provider):
592
592
  if __name__ == "__main__":
593
593
  perplexity = Perplexity()
594
594
  # Stream the response
595
- response = perplexity.chat("What is the meaning of life?")
595
+ response = perplexity.chat("tell me about Abhay koul, HelpingAI ")
596
596
  for chunk in response:
597
597
  print(chunk, end="", flush=True)
598
598
 
webscout/Provider/Reka.py CHANGED
@@ -4,10 +4,10 @@ import requests
4
4
 
5
5
  import json
6
6
 
7
- from ..AIutel import Optimizers
8
- from ..AIutel import Conversation
9
- from ..AIutel import AwesomePrompts, sanitize_stream
10
- from ..AIbase import Provider, AsyncProvider
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
11
  from webscout import exceptions
12
12
 
13
13
  #-----------------------------------------------REKA-----------------------------------------------
@@ -205,4 +205,11 @@ class REKA(Provider):
205
205
  str: Message extracted
206
206
  """
207
207
  assert isinstance(response, dict), "Response should be of dict data-type only"
208
- return response.get("text")
208
+ return response.get("text")
209
+ if __name__ == "__main__":
210
+
211
+ from rich import print
212
+ ai = REKA(api_key="YOUR_API_KEY", timeout=5000)
213
+ response = ai.chat("write a poem about AI", stream=True)
214
+ for chunk in response:
215
+ print(chunk, end="", flush=True)
@@ -0,0 +1,103 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from datetime import datetime
5
+ from typing import List, Dict, Optional
6
+
7
+ from webscout.AIbase import ImageProvider
8
+ from webscout import exceptions
9
+
10
+
11
+ class AIUncensoredImager(ImageProvider):
12
+ """
13
+ Image provider for AIUncensored.info.
14
+ """
15
+
16
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
17
+ """Initializes the AIUncensoredImager class."""
18
+ self.url = "https://twitterclone-i0wr.onrender.com/api/image"
19
+ self.headers = {
20
+ "Content-Type": "application/json",
21
+ "Accept": "*/*",
22
+ "Origin": "https://www.aiuncensored.info",
23
+ "Referer": "https://www.aiuncensored.info/",
24
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0",
25
+ }
26
+ self.session = requests.Session()
27
+ self.session.headers.update(self.headers)
28
+ self.session.proxies.update(proxies)
29
+ self.timeout = timeout
30
+ self.prompt = "AI-generated image - webscout"
31
+ self.image_extension = "jpg"
32
+
33
+ def generate(self, prompt: str, amount: int = 1) -> List[str]:
34
+ """Generates image URLs from a prompt."""
35
+
36
+ assert bool(prompt), "Prompt cannot be null"
37
+ assert isinstance(amount, int) and amount > 0, "Amount must be a positive integer"
38
+ self.prompt = prompt
39
+
40
+ payload = {"prompt": prompt}
41
+ image_urls = []
42
+
43
+ try:
44
+ with self.session.post(self.url, json=payload, timeout=self.timeout) as response:
45
+ response.raise_for_status() # Raise HTTPError for bad responses
46
+ data = response.json()
47
+ image_url = data.get("image_url")
48
+
49
+ if not image_url:
50
+ raise exceptions.InvalidResponseError("No image URL in API response")
51
+
52
+ image_urls.append(image_url) # Only one image returned for now
53
+
54
+
55
+ except requests.exceptions.RequestException as e:
56
+ raise exceptions.APIConnectionError(f"Error during request: {e}")
57
+ except json.JSONDecodeError as e:
58
+ raise exceptions.InvalidResponseError(f"Invalid JSON response: {e}")
59
+ except Exception as e:
60
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {e}")
61
+
62
+ return image_urls
63
+
64
+ def save(
65
+ self,
66
+ response: List[str],
67
+ name: str = None,
68
+ dir: str = os.getcwd(),
69
+ filenames_prefix: str = "",
70
+ ) -> List[str]:
71
+ """Saves generated images."""
72
+
73
+ assert isinstance(response, list), f"Response should be a list, not {type(response)}"
74
+ name = self.prompt if name is None else name
75
+ os.makedirs(dir, exist_ok=True) #Create dir if needed
76
+
77
+ filenames = []
78
+ for i, img_url in enumerate(response):
79
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
80
+ filename = f"{dir}/{name}_{i + 1}_{timestamp}.{self.image_extension}"
81
+ filenames.append(filenames_prefix + os.path.basename(filename))
82
+
83
+ try:
84
+ with self.session.get(img_url, stream=True, timeout=self.timeout) as img_response:
85
+ img_response.raise_for_status()
86
+ with open(filename, "wb") as f:
87
+ for chunk in img_response.iter_content(chunk_size=8192):
88
+ f.write(chunk)
89
+ except requests.exceptions.RequestException as e:
90
+ raise exceptions.FailedToSaveImageError(f"Error downloading/saving image: {e}")
91
+
92
+ return filenames
93
+
94
+
95
+ if __name__ == "__main__":
96
+ imager = AIUncensoredImager()
97
+ prompt = "a photo of a cat sitting in a basket"
98
+ try:
99
+ image_urls = imager.generate(prompt, amount=1)
100
+ saved_filenames = imager.save(image_urls)
101
+ print(f"Saved filenames: {saved_filenames}")
102
+ except Exception as e:
103
+ print(f"An error occurred: {e}")
@@ -11,7 +11,7 @@ class NexraImager(ImageProvider):
11
11
  """Image provider for Nexra API"""
12
12
 
13
13
  AVAILABLE_MODELS = {
14
- "standard": ["emi", "stablediffusion-1.5", "stablediffusion-2.1", "sdxl-lora", "dalle", "dalle2", "dalle-mini"],
14
+ "standard": ["emi", "stablediffusion-1.5", "stablediffusion-2.1", "sdxl-lora", "dalle", "dalle2", "dalle-mini", "flux", "midjourney"],
15
15
  "prodia": [
16
16
  "dreamshaperXL10_alpha2.safetensors [c8afe2ef]",
17
17
  "dynavisionXL_0411.safetensors [c39cc051]",
@@ -37,7 +37,7 @@ class NexraImager(ImageProvider):
37
37
  self.image_extension: str = "png"
38
38
 
39
39
  def generate(
40
- self, prompt: str, model: str = "emi", amount: int = 1,
40
+ self, prompt: str, model: str = "flux", amount: int = 1,
41
41
  max_retries: int = 3, retry_delay: int = 5,
42
42
  additional_params: Optional[dict] = None
43
43
  ) -> List[bytes]:
@@ -116,5 +116,5 @@ class NexraImager(ImageProvider):
116
116
 
117
117
  if __name__ == "__main__":
118
118
  bot = NexraImager()
119
- resp_standard = bot.generate("AI-generated image - webscout", "emi", 1)
119
+ resp_standard = bot.generate("AI-generated image - webscout", "midjourney", 1)
120
120
  print(bot.save(resp_standard))
@@ -5,5 +5,7 @@ from .blackboximage import *
5
5
  from .Nexra import *
6
6
  from .huggingface import *
7
7
  from .artbit import *
8
- from .amigo import *
9
- from .WebSimAI import *
8
+ from .WebSimAI import *
9
+ from .imgninza import *
10
+ from .AIuncensored import *
11
+ from .talkai import *
@@ -8,7 +8,7 @@ from requests.exceptions import RequestException
8
8
 
9
9
  from webscout.AIbase import ImageProvider
10
10
 
11
- class AiForceimagger(ImageProvider):
11
+ class AiForceimager(ImageProvider):
12
12
  """Image provider for Airforce API"""
13
13
 
14
14
  AVAILABLE_MODELS = [
@@ -152,7 +152,7 @@ class AiForceimagger(ImageProvider):
152
152
  return filenames
153
153
 
154
154
  if __name__ == "__main__":
155
- bot = AiForceimagger()
155
+ bot = AiForceimager()
156
156
  try:
157
157
  resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1, model="flux-realism")
158
158
  print(bot.save(resp))
@@ -0,0 +1,136 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import List, Dict, Optional
5
+
6
+ from webscout.AIbase import ImageProvider
7
+ from webscout import exceptions # Import exceptions module
8
+
9
+
10
+ class NinjaImager(ImageProvider):
11
+ """
12
+ Image provider for NinjaChat.ai.
13
+ """
14
+
15
+ AVAILABLE_MODELS = {
16
+ "stable-diffusion": "https://www.ninjachat.ai/api/image-generator",
17
+ "flux-dev": "https://www.ninjachat.ai/api/flux-image-generator",
18
+ }
19
+
20
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
21
+ """Initializes the NinjaChatImager class."""
22
+ self.headers = {
23
+ "Accept": "*/*",
24
+ "Accept-Encoding": "gzip, deflate, br, zstd",
25
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
26
+ "Content-Type": "application/json",
27
+ "DNT": "1",
28
+ "Origin": "https://www.ninjachat.ai",
29
+ "Priority": "u=1, i",
30
+ "Referer": "https://www.ninjachat.ai/image-generation",
31
+ "Sec-CH-UA": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
32
+ "Sec-CH-UA-Mobile": "?0",
33
+ "Sec-CH-UA-Platform": '"Windows"',
34
+ "Sec-Fetch-Dest": "empty",
35
+ "Sec-Fetch-Mode": "cors",
36
+ "Sec-Fetch-Site": "same-origin",
37
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0",
38
+ }
39
+ self.session = requests.Session()
40
+ self.session.headers.update(self.headers)
41
+ self.session.proxies.update(proxies)
42
+ self.timeout = timeout
43
+ self.prompt = "AI-generated image - webscout"
44
+ self.image_extension = "png" # Default extension
45
+
46
+ def generate(self, prompt: str, amount: int = 1, model: str = "flux-dev") -> List[str]:
47
+ """Generate images from a prompt."""
48
+
49
+ assert bool(prompt), "Prompt cannot be null"
50
+ assert isinstance(amount, int) and amount > 0, "Amount should be a positive integer"
51
+
52
+ if model not in self.AVAILABLE_MODELS:
53
+ raise exceptions.ModelNotFoundError(f"Model '{model}' not found. Available models: {', '.join(self.AVAILABLE_MODELS)}")
54
+
55
+ self.prompt = prompt # Store the prompt
56
+ url = self.AVAILABLE_MODELS[model]
57
+
58
+ payload = {
59
+ "prompt": prompt,
60
+ "model": model if model == "flux-dev" else "stable-diffusion", # Pass model name to API if needed
61
+ "negativePrompt": "", #Use negative prompt from API's data structure
62
+ "cfg": 7,
63
+ "aspectRatio": "1:1",
64
+ "outputFormat": self.image_extension,
65
+ "numOutputs": amount,
66
+ "outputQuality": 90
67
+ }
68
+
69
+
70
+ image_urls = []
71
+ try:
72
+ with requests.post(url, headers=self.headers, json=payload, timeout=self.timeout) as response:
73
+ if response.status_code != 200:
74
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code: {response.status_code}, {response.text}") # Raise Webscout exception
75
+
76
+ data = response.json()
77
+
78
+ if 'output' not in data:
79
+ raise exceptions.InvalidResponseError("Invalid API response format: 'output' key missing.")
80
+
81
+ for img_url in data['output']:
82
+ image_urls.append(img_url)
83
+
84
+ except requests.exceptions.RequestException as e:
85
+ raise exceptions.APIConnectionError(f"An error occurred during the request: {e}") # More specific exception
86
+ except json.JSONDecodeError as e:
87
+ raise exceptions.InvalidResponseError(f"Failed to parse JSON response: {e}")
88
+
89
+ return image_urls
90
+
91
+
92
+ def save(
93
+ self,
94
+ response: List[str],
95
+ name: str = None,
96
+ dir: str = os.getcwd(),
97
+ filenames_prefix: str = "",
98
+ ) -> List[str]:
99
+ """Saves generated images."""
100
+
101
+ assert isinstance(response, list), f"Response should be a list, not {type(response)}"
102
+ name = self.prompt if name is None else name
103
+
104
+ filenames = []
105
+ count = 0
106
+ for img_url in response:
107
+ def complete_path():
108
+ count_value = "" if count == 0 else f"_{count}"
109
+ return os.path.join(dir, name + count_value + "." + self.image_extension)
110
+
111
+ while os.path.isfile(complete_path()):
112
+ count += 1
113
+
114
+ absolute_path_to_file = complete_path()
115
+ filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1])
116
+ try:
117
+ with requests.get(img_url, stream=True, timeout=self.timeout) as img_response:
118
+ img_response.raise_for_status()
119
+ with open(absolute_path_to_file, "wb") as f:
120
+ for chunk in img_response.iter_content(chunk_size=8192):
121
+ f.write(chunk)
122
+
123
+ except requests.exceptions.RequestException as e:
124
+ raise exceptions.FailedToSaveImageError(f"An error occurred while downloading/saving image: {e}")
125
+
126
+ return filenames
127
+
128
+
129
+
130
+ if __name__ == "__main__":
131
+ bot = NinjaImager()
132
+ try:
133
+ resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
134
+ print(bot.save(resp))
135
+ except Exception as e:
136
+ print(f"An error occurred: {e}")