webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (159) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  5. webscout/Provider/AISEARCH/__init__.py +18 -11
  6. webscout/Provider/AISEARCH/scira_search.py +3 -1
  7. webscout/Provider/Aitopia.py +2 -3
  8. webscout/Provider/Andi.py +3 -3
  9. webscout/Provider/ChatGPTClone.py +1 -1
  10. webscout/Provider/ChatSandbox.py +1 -0
  11. webscout/Provider/Cloudflare.py +1 -1
  12. webscout/Provider/Cohere.py +1 -0
  13. webscout/Provider/Deepinfra.py +13 -10
  14. webscout/Provider/ExaAI.py +1 -1
  15. webscout/Provider/ExaChat.py +1 -80
  16. webscout/Provider/Flowith.py +6 -1
  17. webscout/Provider/Gemini.py +7 -5
  18. webscout/Provider/GeminiProxy.py +1 -0
  19. webscout/Provider/GithubChat.py +4 -1
  20. webscout/Provider/Groq.py +1 -1
  21. webscout/Provider/HeckAI.py +8 -4
  22. webscout/Provider/Jadve.py +23 -38
  23. webscout/Provider/K2Think.py +308 -0
  24. webscout/Provider/Koboldai.py +8 -186
  25. webscout/Provider/LambdaChat.py +2 -4
  26. webscout/Provider/Nemotron.py +3 -4
  27. webscout/Provider/Netwrck.py +6 -8
  28. webscout/Provider/OLLAMA.py +1 -0
  29. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  30. webscout/Provider/OPENAI/FalconH1.py +2 -7
  31. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  32. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  33. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  34. webscout/Provider/OPENAI/PI.py +5 -4
  35. webscout/Provider/OPENAI/Qwen3.py +2 -3
  36. webscout/Provider/OPENAI/README.md +2 -1
  37. webscout/Provider/OPENAI/TogetherAI.py +52 -57
  38. webscout/Provider/OPENAI/TwoAI.py +3 -4
  39. webscout/Provider/OPENAI/__init__.py +17 -56
  40. webscout/Provider/OPENAI/ai4chat.py +313 -303
  41. webscout/Provider/OPENAI/base.py +9 -29
  42. webscout/Provider/OPENAI/chatgpt.py +7 -2
  43. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  44. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  45. webscout/Provider/OPENAI/deepinfra.py +12 -6
  46. webscout/Provider/OPENAI/e2b.py +60 -8
  47. webscout/Provider/OPENAI/flowith.py +4 -3
  48. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  49. webscout/Provider/OPENAI/heckai.py +4 -1
  50. webscout/Provider/OPENAI/netwrck.py +9 -12
  51. webscout/Provider/OPENAI/refact.py +274 -0
  52. webscout/Provider/OPENAI/scirachat.py +6 -0
  53. webscout/Provider/OPENAI/textpollinations.py +3 -14
  54. webscout/Provider/OPENAI/toolbaz.py +14 -10
  55. webscout/Provider/OpenGPT.py +1 -1
  56. webscout/Provider/Openai.py +150 -402
  57. webscout/Provider/PI.py +1 -0
  58. webscout/Provider/Perplexitylabs.py +1 -2
  59. webscout/Provider/QwenLM.py +107 -89
  60. webscout/Provider/STT/__init__.py +17 -2
  61. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  62. webscout/Provider/StandardInput.py +1 -1
  63. webscout/Provider/TTI/__init__.py +18 -12
  64. webscout/Provider/TTI/bing.py +14 -2
  65. webscout/Provider/TTI/together.py +10 -9
  66. webscout/Provider/TTS/README.md +0 -1
  67. webscout/Provider/TTS/__init__.py +18 -11
  68. webscout/Provider/TTS/base.py +479 -159
  69. webscout/Provider/TTS/deepgram.py +409 -156
  70. webscout/Provider/TTS/elevenlabs.py +425 -111
  71. webscout/Provider/TTS/freetts.py +317 -140
  72. webscout/Provider/TTS/gesserit.py +192 -128
  73. webscout/Provider/TTS/murfai.py +248 -113
  74. webscout/Provider/TTS/openai_fm.py +347 -129
  75. webscout/Provider/TTS/speechma.py +620 -586
  76. webscout/Provider/TeachAnything.py +1 -0
  77. webscout/Provider/TextPollinationsAI.py +5 -15
  78. webscout/Provider/TogetherAI.py +136 -142
  79. webscout/Provider/TwoAI.py +53 -309
  80. webscout/Provider/TypliAI.py +2 -1
  81. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  82. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  83. webscout/Provider/Venice.py +2 -1
  84. webscout/Provider/VercelAI.py +1 -0
  85. webscout/Provider/WiseCat.py +2 -1
  86. webscout/Provider/WrDoChat.py +2 -1
  87. webscout/Provider/__init__.py +18 -174
  88. webscout/Provider/ai4chat.py +1 -1
  89. webscout/Provider/akashgpt.py +7 -10
  90. webscout/Provider/cerebras.py +194 -38
  91. webscout/Provider/chatglm.py +170 -83
  92. webscout/Provider/cleeai.py +1 -2
  93. webscout/Provider/deepseek_assistant.py +1 -1
  94. webscout/Provider/elmo.py +1 -1
  95. webscout/Provider/geminiapi.py +1 -1
  96. webscout/Provider/granite.py +1 -1
  97. webscout/Provider/hermes.py +1 -3
  98. webscout/Provider/julius.py +1 -0
  99. webscout/Provider/learnfastai.py +1 -1
  100. webscout/Provider/llama3mitril.py +1 -1
  101. webscout/Provider/llmchat.py +1 -1
  102. webscout/Provider/llmchatco.py +1 -1
  103. webscout/Provider/meta.py +3 -3
  104. webscout/Provider/oivscode.py +2 -2
  105. webscout/Provider/scira_chat.py +51 -124
  106. webscout/Provider/searchchat.py +1 -0
  107. webscout/Provider/sonus.py +1 -1
  108. webscout/Provider/toolbaz.py +15 -11
  109. webscout/Provider/turboseek.py +31 -22
  110. webscout/Provider/typefully.py +2 -1
  111. webscout/Provider/x0gpt.py +1 -0
  112. webscout/Provider/yep.py +2 -1
  113. webscout/conversation.py +22 -20
  114. webscout/sanitize.py +14 -10
  115. webscout/scout/README.md +20 -23
  116. webscout/scout/core/crawler.py +125 -38
  117. webscout/scout/core/scout.py +26 -5
  118. webscout/tempid.py +6 -0
  119. webscout/version.py +1 -1
  120. webscout/webscout_search.py +13 -6
  121. webscout/webscout_search_async.py +10 -8
  122. webscout/yep_search.py +13 -5
  123. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
  124. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
  125. webscout/Provider/AllenAI.py +0 -440
  126. webscout/Provider/Blackboxai.py +0 -793
  127. webscout/Provider/FreeGemini.py +0 -250
  128. webscout/Provider/Glider.py +0 -225
  129. webscout/Provider/Hunyuan.py +0 -283
  130. webscout/Provider/MCPCore.py +0 -322
  131. webscout/Provider/MiniMax.py +0 -207
  132. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  133. webscout/Provider/OPENAI/MiniMax.py +0 -298
  134. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  135. webscout/Provider/OPENAI/c4ai.py +0 -394
  136. webscout/Provider/OPENAI/copilot.py +0 -305
  137. webscout/Provider/OPENAI/glider.py +0 -330
  138. webscout/Provider/OPENAI/mcpcore.py +0 -431
  139. webscout/Provider/OPENAI/multichat.py +0 -378
  140. webscout/Provider/Reka.py +0 -214
  141. webscout/Provider/TTS/sthir.py +0 -94
  142. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  143. webscout/Provider/asksteve.py +0 -220
  144. webscout/Provider/copilot.py +0 -422
  145. webscout/Provider/freeaichat.py +0 -294
  146. webscout/Provider/koala.py +0 -182
  147. webscout/Provider/lmarena.py +0 -198
  148. webscout/Provider/monochat.py +0 -275
  149. webscout/Provider/multichat.py +0 -375
  150. webscout/Provider/scnet.py +0 -244
  151. webscout/Provider/talkai.py +0 -194
  152. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  153. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  154. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  155. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  156. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  157. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  158. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  159. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,9 @@
1
1
  import json
2
2
  from typing import Union, Any, Dict, Generator, Optional
3
+ import uuid
4
+ import time
3
5
 
4
- import cloudscraper
6
+ from curl_cffi import Session
5
7
 
6
8
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
9
  from webscout.AIbase import Provider
@@ -11,17 +13,24 @@ class QwenLM(Provider):
11
13
  """
12
14
  A class to interact with the QwenLM API
13
15
  """
14
-
16
+ required_auth = True
15
17
  AVAILABLE_MODELS = [
18
+ "qwen-plus-2025-09-11",
19
+ "qwen3-max-preview",
20
+ "qwen3-235b-a22b",
21
+ "qwen3-coder-plus",
22
+ "qwen3-30b-a3b",
23
+ "qwen3-coder-30b-a3b-instruct",
16
24
  "qwen-max-latest",
17
- "qwen-plus-latest",
18
- "qwen2.5-14b-instruct-1m",
25
+ "qwen-plus-2025-01-25",
19
26
  "qwq-32b",
27
+ "qwen-turbo-2025-02-11",
28
+ "qwen2.5-omni-7b",
29
+ "qvq-72b-preview-0310",
30
+ "qwen2.5-vl-32b-instruct",
31
+ "qwen2.5-14b-instruct-1m",
20
32
  "qwen2.5-coder-32b-instruct",
21
- "qwen-turbo-latest",
22
- "qwen2.5-72b-instruct",
23
- "qwen2.5-vl-72b-instruct",
24
- "qvq-72b-preview"
33
+ "qwen2.5-72b-instruct"
25
34
  ]
26
35
 
27
36
  def __init__(
@@ -36,7 +45,7 @@ class QwenLM(Provider):
36
45
  proxies: dict = {},
37
46
  history_offset: int = 10250,
38
47
  act: Optional[str] = None,
39
- model: str = "qwen-plus-latest",
48
+ model: str = "qwen-plus-2025-09-11",
40
49
  system_prompt: str = "You are a helpful AI assistant."
41
50
  ):
42
51
  """Initializes the QwenLM API client."""
@@ -45,36 +54,38 @@ class QwenLM(Provider):
45
54
  f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
46
55
  )
47
56
 
48
- self.session = cloudscraper.create_scraper()
57
+ self.session = Session(impersonate="chrome")
49
58
  self.is_conversation = is_conversation
50
59
  self.max_tokens_to_sample = max_tokens
51
- self.api_endpoint = "https://chat.qwenlm.ai/api/chat/completions"
60
+ self.api_endpoint = "https://chat.qwen.ai/api/chat/completions"
52
61
  self.stream_chunk_size = 64
53
62
  self.timeout = timeout
54
63
  self.last_response = {}
55
64
  self.model = model
56
65
  self.system_prompt = system_prompt
57
66
  self.cookies_path = cookies_path
58
- self.cookie_string, self.token = self._load_cookies()
67
+ self.cookies_dict, self.token = self._load_cookies()
68
+ self.chat_id = str(uuid.uuid4())
59
69
 
60
70
  self.headers = {
61
- "accept": "*/*",
62
- "accept-language": "en-US,en;q=0.9",
63
- "content-type": "application/json",
64
- "origin": "https://chat.qwenlm.ai",
65
- "referer": "https://chat.qwenlm.ai/",
66
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
71
+ "Accept": "*/*",
72
+ "Accept-Language": "en-US,en;q=0.9",
73
+ "Cache-Control": "no-cache",
74
+ "Connection": "keep-alive",
75
+ "DNT": "1",
76
+ "Origin": "https://chat.qwen.ai",
77
+ "Pragma": "no-cache",
78
+ "Referer": f"https://chat.qwen.ai/c/{self.chat_id}",
79
+ "Sec-Fetch-Dest": "empty",
80
+ "Sec-Fetch-Mode": "cors",
81
+ "Sec-Fetch-Site": "same-origin",
82
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
67
83
  "authorization": f"Bearer {self.token}" if self.token else '',
68
84
  }
69
85
  self.session.headers.update(self.headers)
86
+ self.session.cookies.update(self.cookies_dict)
70
87
  self.session.proxies = proxies
71
88
  self.chat_type = "t2t" # search - used WEB, t2t - chatbot, t2i - image_gen
72
- if self.chat_type != "t2t":
73
- AVAILABLE_MODELS = [
74
- 'qwen-plus-latest', 'qvq-72b-preview',
75
- 'qvq-32b', 'qwen-turbo-latest',
76
- 'qwen-max-latest'
77
- ]
78
89
 
79
90
  self.__available_optimizers = (
80
91
  method
@@ -94,19 +105,14 @@ class QwenLM(Provider):
94
105
  )
95
106
  self.conversation.history_offset = history_offset
96
107
 
97
- def _load_cookies(self) -> tuple[str, str]:
98
- """Load cookies from a JSON file and build a cookie header string."""
108
+ def _load_cookies(self) -> tuple[dict, str]:
109
+ """Load cookies from a JSON file and build a cookie dict."""
99
110
  try:
100
111
  with open(self.cookies_path, "r") as f:
101
112
  cookies = json.load(f)
102
- cookie_string = "; ".join(
103
- f"{cookie['name']}={cookie['value']}" for cookie in cookies
104
- )
105
- token = next(
106
- (cookie.get("value") for cookie in cookies if cookie.get("name") == "token"),
107
- "",
108
- )
109
- return cookie_string, token
113
+ cookies_dict = {cookie['name']: cookie['value'] for cookie in cookies}
114
+ token = cookies_dict.get("token", "")
115
+ return cookies_dict, token
110
116
  except FileNotFoundError:
111
117
  raise exceptions.InvalidAuthenticationError(
112
118
  "Error: cookies.json file not found!"
@@ -138,14 +144,22 @@ class QwenLM(Provider):
138
144
  )
139
145
 
140
146
  payload = {
141
- 'chat_type': self.chat_type,
147
+ 'stream': stream,
148
+ 'incremental_output': False,
149
+ "chat_type": "t2t",
150
+ "model": self.model,
142
151
  "messages": [
143
- {"role": "system", "content": self.system_prompt},
144
- {"role": "user", "content": conversation_prompt}
152
+ {
153
+ "role": "user",
154
+ "content": conversation_prompt,
155
+ "chat_type": "t2t",
156
+ "extra": {},
157
+ "feature_config": {"thinking_enabled": False},
158
+ }
145
159
  ],
146
- "model": self.model,
147
- "stream": stream,
148
- "max_tokens": self.max_tokens_to_sample
160
+ "session_id": str(uuid.uuid4()),
161
+ "chat_id": str(uuid.uuid4()),
162
+ "id": str(uuid.uuid4()),
149
163
  }
150
164
 
151
165
  def for_stream() -> Generator[Dict[str, Any], None, None]:
@@ -158,34 +172,29 @@ class QwenLM(Provider):
158
172
  )
159
173
 
160
174
  cumulative_text = ""
161
- for line in response.iter_lines(decode_unicode=True):
162
- if line and line.startswith("data: "):
163
- data = line[6:]
164
- if data == "[DONE]":
165
- break
166
- try:
167
- json_data = json.loads(data)
168
- # Handle multiple response formats
169
- if "choices" in json_data:
170
- new_content = json_data.get("choices")[0].get("delta", {}).get("content", "")
171
- elif "messages" in json_data:
172
- assistant_msg = next(
173
- (msg for msg in reversed(json_data["messages"]) if msg.get("role") == "assistant"),
174
- {}
175
- )
176
- content_field = assistant_msg.get("content", "")
177
- if isinstance(content_field, list):
178
- new_content = "".join(item.get("text", "") for item in content_field)
179
- else:
180
- new_content = content_field
181
- else:
182
- new_content = ""
183
- delta = new_content[len(cumulative_text):]
184
- cumulative_text = new_content
185
- if delta:
186
- yield delta if raw else {"text": delta}
187
- except json.JSONDecodeError:
188
- continue
175
+ for line in response.iter_lines(decode_unicode=False):
176
+ if line:
177
+ line = line.decode('utf-8') if isinstance(line, bytes) else line
178
+ if line.startswith("data: "):
179
+ data = line[6:]
180
+ if data == "[DONE]":
181
+ break
182
+ try:
183
+ json_data = json.loads(data)
184
+ if "response.created" in json_data:
185
+ # Initial response, can ignore or use for chat_id etc.
186
+ continue
187
+ if "choices" in json_data:
188
+ delta = json_data["choices"][0]["delta"]
189
+ new_content = delta.get("content", "")
190
+ status = delta.get("status", "")
191
+ if status == "finished":
192
+ break
193
+ cumulative_text += new_content
194
+ if new_content:
195
+ yield delta if raw else {"text": new_content}
196
+ except json.JSONDecodeError:
197
+ continue
189
198
  self.last_response.update(dict(text=cumulative_text))
190
199
  self.conversation.update_chat_history(
191
200
  prompt, self.get_message(self.last_response)
@@ -193,29 +202,35 @@ class QwenLM(Provider):
193
202
 
194
203
  def for_non_stream() -> Dict[str, Any]:
195
204
  """
196
- Handles non-streaming responses by aggregating all streamed chunks into a single string.
205
+ Handles non-streaming responses by making a non-streaming request.
197
206
  """
198
207
 
199
- # Initialize an empty string to accumulate the full response
200
- full_response = ""
208
+ # Create a non-streaming payload
209
+ non_stream_payload = payload.copy()
210
+ non_stream_payload['stream'] = False
211
+ non_stream_payload['incremental_output'] = False
201
212
 
202
- # Iterate through the stream generator and accumulate the text
203
- try:
204
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
205
- if isinstance(response, dict): # Check if the response is a dictionary
206
- full_response += response.get("text", "") # Extract and append the "text" field
207
- elif isinstance(response, str): # If the response is a string, directly append it
208
- full_response += response
209
- except Exception as e:
210
- raise
213
+ response = self.session.post(
214
+ self.api_endpoint, json=non_stream_payload, headers=self.headers, stream=False, timeout=self.timeout
215
+ )
216
+ if not response.ok:
217
+ raise exceptions.FailedToGenerateResponseError(
218
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
219
+ )
211
220
 
212
- # Ensure last_response is updated with the aggregated text
213
- self.last_response.update({"text": full_response})
221
+ result = response.json()
222
+ assistant_reply = (
223
+ result.get("choices", [{}])[0]
224
+ .get("message", {})
225
+ .get("content", "")
226
+ )
214
227
 
215
- # Update conversation history with the final response
216
- self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
228
+ self.last_response.update({"text": assistant_reply})
229
+ self.conversation.update_chat_history(
230
+ prompt, self.get_message(self.last_response)
231
+ )
217
232
 
218
- return {"text": full_response} # Return the dictionary containing the full response
233
+ return {"text": assistant_reply}
219
234
 
220
235
  return for_stream() if stream else for_non_stream()
221
236
 
@@ -246,9 +261,12 @@ class QwenLM(Provider):
246
261
 
247
262
  if __name__ == "__main__":
248
263
  from rich import print
249
- ai = QwenLM(cookies_path="cookies.json")
250
- response = ai.chat(input(">>> "), stream=False)
251
- ai.chat_type = "search" # search - used WEB, t2t - chatbot, t2i - image_gen
252
- print(response)
264
+ cookies_path = r"C:\Users\koula\Desktop\Webscout\cookies.json"
265
+ for model in QwenLM.AVAILABLE_MODELS:
266
+ ai = QwenLM(cookies_path=cookies_path, model=model)
267
+ response = ai.chat("hi")
268
+ print(f"Model: {model}")
269
+ print(response)
270
+ print("-" * 50)
253
271
  # for chunk in response:
254
272
  # print(chunk, end="", flush=True)
@@ -1,3 +1,18 @@
1
1
  # This file marks the directory as a Python package.
2
- from .base import *
3
- from .elevenlabs import *
2
+
3
+ import os
4
+ import importlib
5
+ from pathlib import Path
6
+
7
+ # Get current directory
8
+ current_dir = Path(__file__).parent
9
+
10
+ # Auto-import all .py files (except __init__.py)
11
+ for file_path in current_dir.glob("*.py"):
12
+ if file_path.name != "__init__.py":
13
+ module_name = file_path.stem
14
+ try:
15
+ module = importlib.import_module(f".{module_name}", package=__name__)
16
+ globals().update(vars(module))
17
+ except ImportError:
18
+ pass # Skip files that can't be imported
@@ -13,19 +13,18 @@ class Sambanova(Provider):
13
13
  """
14
14
  A class to interact with the Sambanova API.
15
15
  """
16
-
16
+ required_auth = True
17
17
  AVAILABLE_MODELS = [
18
- "Meta-Llama-3.1-8B-Instruct",
19
- "Meta-Llama-3.1-70B-Instruct",
20
- "Meta-Llama-3.1-405B-Instruct",
18
+ "DeepSeek-R1-0528",
21
19
  "DeepSeek-R1-Distill-Llama-70B",
22
- "Llama-3.1-Tulu-3-405B",
23
- "Meta-Llama-3.2-1B-Instruct",
24
- "Meta-Llama-3.2-3B-Instruct",
20
+ "DeepSeek-V3.1",
21
+ "gpt-oss-120b",
22
+ "Qwen3-32B",
23
+ "DeepSeek-V3-0324",
24
+ "Meta-Llama-3.1-8B-Instruct",
25
25
  "Meta-Llama-3.3-70B-Instruct",
26
- "Qwen2.5-72B-Instruct",
27
- "Qwen2.5-Coder-32B-Instruct",
28
- "QwQ-32B-Preview"
26
+ "Llama-3.3-Swallow-70B-Instruct-v0.4",
27
+ "Llama-4-Maverick-17B-128E-Instruct"
29
28
  ]
30
29
 
31
30
  def __init__(
@@ -13,7 +13,7 @@ class StandardInputAI(Provider):
13
13
  """
14
14
  A class to interact with the Standard Input chat API.
15
15
  """
16
-
16
+ required_auth = False
17
17
  AVAILABLE_MODELS = {
18
18
  "standard-quick": "quick",
19
19
  "standard-reasoning": "quick", # Same model but with reasoning enabled
@@ -1,12 +1,18 @@
1
- from .pollinations import *
2
- from .piclumen import *
3
- from .magicstudio import *
4
- from .pixelmuse import *
5
- from .aiarta import *
6
- from .gpt1image import *
7
- from .imagen import *
8
- from .together import *
9
- from .bing import *
10
- from .infip import *
11
- from .monochat import *
12
- from .venice import *
1
+ # This file marks the directory as a Python package.
2
+
3
+ import os
4
+ import importlib
5
+ from pathlib import Path
6
+
7
+ # Get current directory
8
+ current_dir = Path(__file__).parent
9
+
10
+ # Auto-import all .py files (except __init__.py)
11
+ for file_path in current_dir.glob("*.py"):
12
+ if file_path.name != "__init__.py":
13
+ module_name = file_path.stem
14
+ try:
15
+ module = importlib.import_module(f".{module_name}", package=__name__)
16
+ globals().update(vars(module))
17
+ except ImportError:
18
+ pass # Skip files that can't be imported
@@ -20,7 +20,7 @@ class Images(BaseImages):
20
20
  def create(
21
21
  self,
22
22
  *,
23
- model: str = "bing",
23
+ model: str = "dalle",
24
24
  prompt: str,
25
25
  n: int = 1,
26
26
  size: str = "1024x1024",
@@ -42,10 +42,21 @@ class Images(BaseImages):
42
42
  headers = self._client.headers
43
43
  images = []
44
44
  urls = []
45
+
46
+ # Map model names to Bing model codes
47
+ model_mapping = {
48
+ "dalle": "0",
49
+ "gpt4o": "1",
50
+ }
51
+
52
+ # Get the appropriate model code
53
+ model_code = model_mapping.get(model.lower(), "4")
54
+
45
55
  for _ in range(n):
46
56
  data = {
47
57
  "q": prompt,
48
58
  "rt": "4",
59
+ "mdl": model_code,
49
60
  "FORM": "GENCRE"
50
61
  }
51
62
  response = session.post(
@@ -221,8 +232,9 @@ class BingImageAI(TTICompatibleProvider):
221
232
 
222
233
  if __name__ == "__main__":
223
234
  from rich import print
224
- client = BingImageAI(cookie="1pkdvumH1SEjFkDjFymRYKouIRoXZlh_p5RTfAttx4DaaNOSDyz8qFP2M7LbZ93fbl4f6Xm8fTGwXHNDB648Gom5jfnTU_Iz-VH47l0HTYJDS1sItbBBS-sqSISFgXR62SoqnW5eX5MFht-j2uB1gZ4uDnpR_60fLRTCdW1SIRegDvnBm1TGhRiZsi6wUPyzwFg7-PsXAs3Fq9iV9m-0FEw")
235
+ client = BingImageAI(cookie="1QyBY4Z1eHBW6fbI25kdM5TrlRGWzn5PFySapCOfvvz04zaounFG660EipVJSOXXvcdeXXLwsWHdDI8bNymucF_QnMHSlY1mc0pPI7e9Ar6o-_7e9Ik5QOe1nkJIe5vz22pibioTqx0IfVKwmVbX22A3bFD7ODaSZalKFr-AuxgAaRVod-giTTry6Ei7RVgisF7BHlkMPPwtCeO234ujgug")
225
236
  response = client.images.create(
237
+ model="gpt4o",
226
238
  prompt="A cat riding a bicycle",
227
239
  response_format="url",
228
240
  n=4,
@@ -202,24 +202,25 @@ class Images(BaseImages):
202
202
  class TogetherImage(TTICompatibleProvider):
203
203
  """
204
204
  Together.xyz Text-to-Image provider
205
- Updated: 2025-06-02 10:42:41 UTC by OEvortex
205
+ Updated: 2025-08-01 10:42:41 UTC by OEvortex
206
206
  Supports FLUX and other image generation models
207
207
  """
208
208
 
209
209
  # Image models from Together.xyz API (filtered for image type only)
210
210
  AVAILABLE_MODELS = [
211
- "black-forest-labs/FLUX.1-pro",
212
- "black-forest-labs/FLUX.1.1-pro",
213
- "black-forest-labs/FLUX.1-redux",
214
- "black-forest-labs/FLUX.1-dev-lora",
215
- "black-forest-labs/FLUX.1-schnell",
211
+ "black-forest-labs/FLUX.1-canny",
216
212
  "black-forest-labs/FLUX.1-depth",
217
- "black-forest-labs/FLUX.1-kontext-dev",
218
213
  "black-forest-labs/FLUX.1-dev",
219
- "black-forest-labs/FLUX.1-canny",
214
+ "black-forest-labs/FLUX.1-dev-lora",
215
+ "black-forest-labs/FLUX.1-kontext-dev",
220
216
  "black-forest-labs/FLUX.1-kontext-max",
217
+ "black-forest-labs/FLUX.1-kontext-pro",
218
+ "black-forest-labs/FLUX.1-krea-dev",
219
+ "black-forest-labs/FLUX.1-pro",
220
+ "black-forest-labs/FLUX.1-redux",
221
+ "black-forest-labs/FLUX.1-schnell",
221
222
  "black-forest-labs/FLUX.1-schnell-Free",
222
- "black-forest-labs/FLUX.1-kontext-pro"
223
+ "black-forest-labs/FLUX.1.1-pro"
223
224
  ]
224
225
 
225
226
  def __init__(self):
@@ -24,7 +24,6 @@ Webscout's TTS Providers offer a versatile and powerful text-to-speech conversio
24
24
  5. **DeepgramTTS**
25
25
  6. **StreamElementsTTS**
26
26
  7. **SpeechMaTTS**
27
- 8. **SthirTTS**
28
27
  9. **FreeTTS**
29
28
  ## 🚀 Installation
30
29
 
@@ -1,11 +1,18 @@
1
- from .base import BaseTTSProvider, AsyncBaseTTSProvider
2
- from .streamElements import *
3
- from .parler import *
4
- from .deepgram import *
5
- from .elevenlabs import *
6
- from .murfai import *
7
- from .gesserit import *
8
- from .speechma import *
9
- from .sthir import *
10
- from .openai_fm import *
11
- from .freetts import *
1
+ # This file marks the directory as a Python package.
2
+
3
+ import os
4
+ import importlib
5
+ from pathlib import Path
6
+
7
+ # Get current directory
8
+ current_dir = Path(__file__).parent
9
+
10
+ # Auto-import all .py files (except __init__.py)
11
+ for file_path in current_dir.glob("*.py"):
12
+ if file_path.name != "__init__.py":
13
+ module_name = file_path.stem
14
+ try:
15
+ module = importlib.import_module(f".{module_name}", package=__name__)
16
+ globals().update(vars(module))
17
+ except ImportError:
18
+ pass # Skip files that can't be imported