webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,9 @@
1
1
  import json
2
2
  from typing import Union, Any, Dict, Generator, Optional
3
+ import uuid
4
+ import time
3
5
 
4
- import cloudscraper
6
+ from curl_cffi import Session
5
7
 
6
8
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
9
  from webscout.AIbase import Provider
@@ -11,17 +13,24 @@ class QwenLM(Provider):
11
13
  """
12
14
  A class to interact with the QwenLM API
13
15
  """
14
-
16
+ required_auth = True
15
17
  AVAILABLE_MODELS = [
18
+ "qwen-plus-2025-09-11",
19
+ "qwen3-max-preview",
20
+ "qwen3-235b-a22b",
21
+ "qwen3-coder-plus",
22
+ "qwen3-30b-a3b",
23
+ "qwen3-coder-30b-a3b-instruct",
16
24
  "qwen-max-latest",
17
- "qwen-plus-latest",
18
- "qwen2.5-14b-instruct-1m",
25
+ "qwen-plus-2025-01-25",
19
26
  "qwq-32b",
27
+ "qwen-turbo-2025-02-11",
28
+ "qwen2.5-omni-7b",
29
+ "qvq-72b-preview-0310",
30
+ "qwen2.5-vl-32b-instruct",
31
+ "qwen2.5-14b-instruct-1m",
20
32
  "qwen2.5-coder-32b-instruct",
21
- "qwen-turbo-latest",
22
- "qwen2.5-72b-instruct",
23
- "qwen2.5-vl-72b-instruct",
24
- "qvq-72b-preview"
33
+ "qwen2.5-72b-instruct"
25
34
  ]
26
35
 
27
36
  def __init__(
@@ -36,7 +45,7 @@ class QwenLM(Provider):
36
45
  proxies: dict = {},
37
46
  history_offset: int = 10250,
38
47
  act: Optional[str] = None,
39
- model: str = "qwen-plus-latest",
48
+ model: str = "qwen-plus-2025-09-11",
40
49
  system_prompt: str = "You are a helpful AI assistant."
41
50
  ):
42
51
  """Initializes the QwenLM API client."""
@@ -45,36 +54,38 @@ class QwenLM(Provider):
45
54
  f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
46
55
  )
47
56
 
48
- self.session = cloudscraper.create_scraper()
57
+ self.session = Session(impersonate="chrome")
49
58
  self.is_conversation = is_conversation
50
59
  self.max_tokens_to_sample = max_tokens
51
- self.api_endpoint = "https://chat.qwenlm.ai/api/chat/completions"
60
+ self.api_endpoint = "https://chat.qwen.ai/api/chat/completions"
52
61
  self.stream_chunk_size = 64
53
62
  self.timeout = timeout
54
63
  self.last_response = {}
55
64
  self.model = model
56
65
  self.system_prompt = system_prompt
57
66
  self.cookies_path = cookies_path
58
- self.cookie_string, self.token = self._load_cookies()
67
+ self.cookies_dict, self.token = self._load_cookies()
68
+ self.chat_id = str(uuid.uuid4())
59
69
 
60
70
  self.headers = {
61
- "accept": "*/*",
62
- "accept-language": "en-US,en;q=0.9",
63
- "content-type": "application/json",
64
- "origin": "https://chat.qwenlm.ai",
65
- "referer": "https://chat.qwenlm.ai/",
66
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
71
+ "Accept": "*/*",
72
+ "Accept-Language": "en-US,en;q=0.9",
73
+ "Cache-Control": "no-cache",
74
+ "Connection": "keep-alive",
75
+ "DNT": "1",
76
+ "Origin": "https://chat.qwen.ai",
77
+ "Pragma": "no-cache",
78
+ "Referer": f"https://chat.qwen.ai/c/{self.chat_id}",
79
+ "Sec-Fetch-Dest": "empty",
80
+ "Sec-Fetch-Mode": "cors",
81
+ "Sec-Fetch-Site": "same-origin",
82
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
67
83
  "authorization": f"Bearer {self.token}" if self.token else '',
68
84
  }
69
85
  self.session.headers.update(self.headers)
86
+ self.session.cookies.update(self.cookies_dict)
70
87
  self.session.proxies = proxies
71
88
  self.chat_type = "t2t" # search - used WEB, t2t - chatbot, t2i - image_gen
72
- if self.chat_type != "t2t":
73
- AVAILABLE_MODELS = [
74
- 'qwen-plus-latest', 'qvq-72b-preview',
75
- 'qvq-32b', 'qwen-turbo-latest',
76
- 'qwen-max-latest'
77
- ]
78
89
 
79
90
  self.__available_optimizers = (
80
91
  method
@@ -94,19 +105,14 @@ class QwenLM(Provider):
94
105
  )
95
106
  self.conversation.history_offset = history_offset
96
107
 
97
- def _load_cookies(self) -> tuple[str, str]:
98
- """Load cookies from a JSON file and build a cookie header string."""
108
+ def _load_cookies(self) -> tuple[dict, str]:
109
+ """Load cookies from a JSON file and build a cookie dict."""
99
110
  try:
100
111
  with open(self.cookies_path, "r") as f:
101
112
  cookies = json.load(f)
102
- cookie_string = "; ".join(
103
- f"{cookie['name']}={cookie['value']}" for cookie in cookies
104
- )
105
- token = next(
106
- (cookie.get("value") for cookie in cookies if cookie.get("name") == "token"),
107
- "",
108
- )
109
- return cookie_string, token
113
+ cookies_dict = {cookie['name']: cookie['value'] for cookie in cookies}
114
+ token = cookies_dict.get("token", "")
115
+ return cookies_dict, token
110
116
  except FileNotFoundError:
111
117
  raise exceptions.InvalidAuthenticationError(
112
118
  "Error: cookies.json file not found!"
@@ -138,14 +144,22 @@ class QwenLM(Provider):
138
144
  )
139
145
 
140
146
  payload = {
141
- 'chat_type': self.chat_type,
147
+ 'stream': stream,
148
+ 'incremental_output': False,
149
+ "chat_type": "t2t",
150
+ "model": self.model,
142
151
  "messages": [
143
- {"role": "system", "content": self.system_prompt},
144
- {"role": "user", "content": conversation_prompt}
152
+ {
153
+ "role": "user",
154
+ "content": conversation_prompt,
155
+ "chat_type": "t2t",
156
+ "extra": {},
157
+ "feature_config": {"thinking_enabled": False},
158
+ }
145
159
  ],
146
- "model": self.model,
147
- "stream": stream,
148
- "max_tokens": self.max_tokens_to_sample
160
+ "session_id": str(uuid.uuid4()),
161
+ "chat_id": str(uuid.uuid4()),
162
+ "id": str(uuid.uuid4()),
149
163
  }
150
164
 
151
165
  def for_stream() -> Generator[Dict[str, Any], None, None]:
@@ -158,34 +172,29 @@ class QwenLM(Provider):
158
172
  )
159
173
 
160
174
  cumulative_text = ""
161
- for line in response.iter_lines(decode_unicode=True):
162
- if line and line.startswith("data: "):
163
- data = line[6:]
164
- if data == "[DONE]":
165
- break
166
- try:
167
- json_data = json.loads(data)
168
- # Handle multiple response formats
169
- if "choices" in json_data:
170
- new_content = json_data.get("choices")[0].get("delta", {}).get("content", "")
171
- elif "messages" in json_data:
172
- assistant_msg = next(
173
- (msg for msg in reversed(json_data["messages"]) if msg.get("role") == "assistant"),
174
- {}
175
- )
176
- content_field = assistant_msg.get("content", "")
177
- if isinstance(content_field, list):
178
- new_content = "".join(item.get("text", "") for item in content_field)
179
- else:
180
- new_content = content_field
181
- else:
182
- new_content = ""
183
- delta = new_content[len(cumulative_text):]
184
- cumulative_text = new_content
185
- if delta:
186
- yield delta if raw else {"text": delta}
187
- except json.JSONDecodeError:
188
- continue
175
+ for line in response.iter_lines(decode_unicode=False):
176
+ if line:
177
+ line = line.decode('utf-8') if isinstance(line, bytes) else line
178
+ if line.startswith("data: "):
179
+ data = line[6:]
180
+ if data == "[DONE]":
181
+ break
182
+ try:
183
+ json_data = json.loads(data)
184
+ if "response.created" in json_data:
185
+ # Initial response, can ignore or use for chat_id etc.
186
+ continue
187
+ if "choices" in json_data:
188
+ delta = json_data["choices"][0]["delta"]
189
+ new_content = delta.get("content", "")
190
+ status = delta.get("status", "")
191
+ if status == "finished":
192
+ break
193
+ cumulative_text += new_content
194
+ if new_content:
195
+ yield delta if raw else {"text": new_content}
196
+ except json.JSONDecodeError:
197
+ continue
189
198
  self.last_response.update(dict(text=cumulative_text))
190
199
  self.conversation.update_chat_history(
191
200
  prompt, self.get_message(self.last_response)
@@ -193,29 +202,35 @@ class QwenLM(Provider):
193
202
 
194
203
  def for_non_stream() -> Dict[str, Any]:
195
204
  """
196
- Handles non-streaming responses by aggregating all streamed chunks into a single string.
205
+ Handles non-streaming responses by making a non-streaming request.
197
206
  """
198
207
 
199
- # Initialize an empty string to accumulate the full response
200
- full_response = ""
208
+ # Create a non-streaming payload
209
+ non_stream_payload = payload.copy()
210
+ non_stream_payload['stream'] = False
211
+ non_stream_payload['incremental_output'] = False
201
212
 
202
- # Iterate through the stream generator and accumulate the text
203
- try:
204
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
205
- if isinstance(response, dict): # Check if the response is a dictionary
206
- full_response += response.get("text", "") # Extract and append the "text" field
207
- elif isinstance(response, str): # If the response is a string, directly append it
208
- full_response += response
209
- except Exception as e:
210
- raise
213
+ response = self.session.post(
214
+ self.api_endpoint, json=non_stream_payload, headers=self.headers, stream=False, timeout=self.timeout
215
+ )
216
+ if not response.ok:
217
+ raise exceptions.FailedToGenerateResponseError(
218
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
219
+ )
211
220
 
212
- # Ensure last_response is updated with the aggregated text
213
- self.last_response.update({"text": full_response})
221
+ result = response.json()
222
+ assistant_reply = (
223
+ result.get("choices", [{}])[0]
224
+ .get("message", {})
225
+ .get("content", "")
226
+ )
214
227
 
215
- # Update conversation history with the final response
216
- self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
228
+ self.last_response.update({"text": assistant_reply})
229
+ self.conversation.update_chat_history(
230
+ prompt, self.get_message(self.last_response)
231
+ )
217
232
 
218
- return {"text": full_response} # Return the dictionary containing the full response
233
+ return {"text": assistant_reply}
219
234
 
220
235
  return for_stream() if stream else for_non_stream()
221
236
 
@@ -246,9 +261,12 @@ class QwenLM(Provider):
246
261
 
247
262
  if __name__ == "__main__":
248
263
  from rich import print
249
- ai = QwenLM(cookies_path="cookies.json")
250
- response = ai.chat(input(">>> "), stream=False)
251
- ai.chat_type = "search" # search - used WEB, t2t - chatbot, t2i - image_gen
252
- print(response)
264
+ cookies_path = r"C:\Users\koula\Desktop\Webscout\cookies.json"
265
+ for model in QwenLM.AVAILABLE_MODELS:
266
+ ai = QwenLM(cookies_path=cookies_path, model=model)
267
+ response = ai.chat("hi")
268
+ print(f"Model: {model}")
269
+ print(response)
270
+ print("-" * 50)
253
271
  # for chunk in response:
254
272
  # print(chunk, end="", flush=True)
@@ -1,3 +1,18 @@
1
1
  # This file marks the directory as a Python package.
2
- from .base import *
3
- from .elevenlabs import *
2
+
3
+ import os
4
+ import importlib
5
+ from pathlib import Path
6
+
7
+ # Get current directory
8
+ current_dir = Path(__file__).parent
9
+
10
+ # Auto-import all .py files (except __init__.py)
11
+ for file_path in current_dir.glob("*.py"):
12
+ if file_path.name != "__init__.py":
13
+ module_name = file_path.stem
14
+ try:
15
+ module = importlib.import_module(f".{module_name}", package=__name__)
16
+ globals().update(vars(module))
17
+ except ImportError:
18
+ pass # Skip files that can't be imported
@@ -13,19 +13,18 @@ class Sambanova(Provider):
13
13
  """
14
14
  A class to interact with the Sambanova API.
15
15
  """
16
-
16
+ required_auth = True
17
17
  AVAILABLE_MODELS = [
18
- "Meta-Llama-3.1-8B-Instruct",
19
- "Meta-Llama-3.1-70B-Instruct",
20
- "Meta-Llama-3.1-405B-Instruct",
18
+ "DeepSeek-R1-0528",
21
19
  "DeepSeek-R1-Distill-Llama-70B",
22
- "Llama-3.1-Tulu-3-405B",
23
- "Meta-Llama-3.2-1B-Instruct",
24
- "Meta-Llama-3.2-3B-Instruct",
20
+ "DeepSeek-V3.1",
21
+ "gpt-oss-120b",
22
+ "Qwen3-32B",
23
+ "DeepSeek-V3-0324",
24
+ "Meta-Llama-3.1-8B-Instruct",
25
25
  "Meta-Llama-3.3-70B-Instruct",
26
- "Qwen2.5-72B-Instruct",
27
- "Qwen2.5-Coder-32B-Instruct",
28
- "QwQ-32B-Preview"
26
+ "Llama-3.3-Swallow-70B-Instruct-v0.4",
27
+ "Llama-4-Maverick-17B-128E-Instruct"
29
28
  ]
30
29
 
31
30
  def __init__(
@@ -13,7 +13,7 @@ class StandardInputAI(Provider):
13
13
  """
14
14
  A class to interact with the Standard Input chat API.
15
15
  """
16
-
16
+ required_auth = False
17
17
  AVAILABLE_MODELS = {
18
18
  "standard-quick": "quick",
19
19
  "standard-reasoning": "quick", # Same model but with reasoning enabled
@@ -1,12 +1,18 @@
1
- from .pollinations import *
2
- from .piclumen import *
3
- from .magicstudio import *
4
- from .pixelmuse import *
5
- from .aiarta import *
6
- from .gpt1image import *
7
- from .imagen import *
8
- from .together import *
9
- from .bing import *
10
- from .infip import *
11
- from .monochat import *
12
- from .venice import *
1
+ # This file marks the directory as a Python package.
2
+
3
+ import os
4
+ import importlib
5
+ from pathlib import Path
6
+
7
+ # Get current directory
8
+ current_dir = Path(__file__).parent
9
+
10
+ # Auto-import all .py files (except __init__.py)
11
+ for file_path in current_dir.glob("*.py"):
12
+ if file_path.name != "__init__.py":
13
+ module_name = file_path.stem
14
+ try:
15
+ module = importlib.import_module(f".{module_name}", package=__name__)
16
+ globals().update(vars(module))
17
+ except ImportError:
18
+ pass # Skip files that can't be imported
@@ -1,10 +1,18 @@
1
- from .base import BaseTTSProvider, AsyncBaseTTSProvider
2
- from .streamElements import *
3
- from .parler import *
4
- from .deepgram import *
5
- from .elevenlabs import *
6
- from .murfai import *
7
- from .gesserit import *
8
- from .speechma import *
9
- from .openai_fm import *
10
- from .freetts import *
1
+ # This file marks the directory as a Python package.
2
+
3
+ import os
4
+ import importlib
5
+ from pathlib import Path
6
+
7
+ # Get current directory
8
+ current_dir = Path(__file__).parent
9
+
10
+ # Auto-import all .py files (except __init__.py)
11
+ for file_path in current_dir.glob("*.py"):
12
+ if file_path.name != "__init__.py":
13
+ module_name = file_path.stem
14
+ try:
15
+ module = importlib.import_module(f".{module_name}", package=__name__)
16
+ globals().update(vars(module))
17
+ except ImportError:
18
+ pass # Skip files that can't be imported
@@ -15,6 +15,7 @@ class TeachAnything(Provider):
15
15
  """
16
16
  # Add AVAILABLE_MODELS if applicable, otherwise remove model param
17
17
  # AVAILABLE_MODELS = ["default"] # Example
18
+ required_auth = False
18
19
 
19
20
  def __init__(
20
21
  self,
@@ -14,30 +14,23 @@ class TextPollinationsAI(Provider):
14
14
  A class to interact with the Pollinations AI API.
15
15
  """
16
16
 
17
+ required_auth = False
17
18
  AVAILABLE_MODELS = [
18
19
  "deepseek-reasoning",
19
- "glm",
20
- "gpt-5-nano",
21
- "llama-fast-roblox",
22
- "llama-roblox",
23
- "llamascout",
20
+ "gemini",
24
21
  "mistral",
25
- "mistral-nemo-roblox",
26
- "mistral-roblox",
27
22
  "nova-fast",
28
23
  "openai",
29
24
  "openai-audio",
30
25
  "openai-fast",
31
- "openai-large",
32
- "openai-roblox",
26
+ "openai-reasoning",
33
27
  "qwen-coder",
28
+ "roblox-rp",
34
29
  "bidara",
35
30
  "evil",
36
- "hypnosis-tracy",
37
31
  "midijourney",
38
32
  "mirexa",
39
33
  "rtist",
40
- "sur",
41
34
  "unity",
42
35
  ]
43
36
  _models_url = "https://text.pollinations.ai/models"
@@ -315,4 +308,4 @@ if __name__ == "__main__":
315
308
  # print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
316
309
 
317
310
  except Exception as e:
318
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
311
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")