webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +7 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +3 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +3 -2
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/TogetherAI.py +2 -2
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -58
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +6 -6
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +1 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -11
- webscout/Provider/OPENAI/toolbaz.py +14 -11
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTS/__init__.py +18 -10
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -12
- webscout/Provider/TogetherAI.py +86 -87
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -86
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +115 -9
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -12
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
webscout/Provider/QwenLM.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from typing import Union, Any, Dict, Generator, Optional
|
|
3
|
+
import uuid
|
|
4
|
+
import time
|
|
3
5
|
|
|
4
|
-
import
|
|
6
|
+
from curl_cffi import Session
|
|
5
7
|
|
|
6
8
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
9
|
from webscout.AIbase import Provider
|
|
@@ -11,17 +13,24 @@ class QwenLM(Provider):
|
|
|
11
13
|
"""
|
|
12
14
|
A class to interact with the QwenLM API
|
|
13
15
|
"""
|
|
14
|
-
|
|
16
|
+
required_auth = True
|
|
15
17
|
AVAILABLE_MODELS = [
|
|
18
|
+
"qwen-plus-2025-09-11",
|
|
19
|
+
"qwen3-max-preview",
|
|
20
|
+
"qwen3-235b-a22b",
|
|
21
|
+
"qwen3-coder-plus",
|
|
22
|
+
"qwen3-30b-a3b",
|
|
23
|
+
"qwen3-coder-30b-a3b-instruct",
|
|
16
24
|
"qwen-max-latest",
|
|
17
|
-
"qwen-plus-
|
|
18
|
-
"qwen2.5-14b-instruct-1m",
|
|
25
|
+
"qwen-plus-2025-01-25",
|
|
19
26
|
"qwq-32b",
|
|
27
|
+
"qwen-turbo-2025-02-11",
|
|
28
|
+
"qwen2.5-omni-7b",
|
|
29
|
+
"qvq-72b-preview-0310",
|
|
30
|
+
"qwen2.5-vl-32b-instruct",
|
|
31
|
+
"qwen2.5-14b-instruct-1m",
|
|
20
32
|
"qwen2.5-coder-32b-instruct",
|
|
21
|
-
"
|
|
22
|
-
"qwen2.5-72b-instruct",
|
|
23
|
-
"qwen2.5-vl-72b-instruct",
|
|
24
|
-
"qvq-72b-preview"
|
|
33
|
+
"qwen2.5-72b-instruct"
|
|
25
34
|
]
|
|
26
35
|
|
|
27
36
|
def __init__(
|
|
@@ -36,7 +45,7 @@ class QwenLM(Provider):
|
|
|
36
45
|
proxies: dict = {},
|
|
37
46
|
history_offset: int = 10250,
|
|
38
47
|
act: Optional[str] = None,
|
|
39
|
-
model: str = "qwen-plus-
|
|
48
|
+
model: str = "qwen-plus-2025-09-11",
|
|
40
49
|
system_prompt: str = "You are a helpful AI assistant."
|
|
41
50
|
):
|
|
42
51
|
"""Initializes the QwenLM API client."""
|
|
@@ -45,36 +54,38 @@ class QwenLM(Provider):
|
|
|
45
54
|
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
46
55
|
)
|
|
47
56
|
|
|
48
|
-
self.session =
|
|
57
|
+
self.session = Session(impersonate="chrome")
|
|
49
58
|
self.is_conversation = is_conversation
|
|
50
59
|
self.max_tokens_to_sample = max_tokens
|
|
51
|
-
self.api_endpoint = "https://chat.
|
|
60
|
+
self.api_endpoint = "https://chat.qwen.ai/api/chat/completions"
|
|
52
61
|
self.stream_chunk_size = 64
|
|
53
62
|
self.timeout = timeout
|
|
54
63
|
self.last_response = {}
|
|
55
64
|
self.model = model
|
|
56
65
|
self.system_prompt = system_prompt
|
|
57
66
|
self.cookies_path = cookies_path
|
|
58
|
-
self.
|
|
67
|
+
self.cookies_dict, self.token = self._load_cookies()
|
|
68
|
+
self.chat_id = str(uuid.uuid4())
|
|
59
69
|
|
|
60
70
|
self.headers = {
|
|
61
|
-
"
|
|
62
|
-
"
|
|
63
|
-
"
|
|
64
|
-
"
|
|
65
|
-
"
|
|
66
|
-
"
|
|
71
|
+
"Accept": "*/*",
|
|
72
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
73
|
+
"Cache-Control": "no-cache",
|
|
74
|
+
"Connection": "keep-alive",
|
|
75
|
+
"DNT": "1",
|
|
76
|
+
"Origin": "https://chat.qwen.ai",
|
|
77
|
+
"Pragma": "no-cache",
|
|
78
|
+
"Referer": f"https://chat.qwen.ai/c/{self.chat_id}",
|
|
79
|
+
"Sec-Fetch-Dest": "empty",
|
|
80
|
+
"Sec-Fetch-Mode": "cors",
|
|
81
|
+
"Sec-Fetch-Site": "same-origin",
|
|
82
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
|
|
67
83
|
"authorization": f"Bearer {self.token}" if self.token else '',
|
|
68
84
|
}
|
|
69
85
|
self.session.headers.update(self.headers)
|
|
86
|
+
self.session.cookies.update(self.cookies_dict)
|
|
70
87
|
self.session.proxies = proxies
|
|
71
88
|
self.chat_type = "t2t" # search - used WEB, t2t - chatbot, t2i - image_gen
|
|
72
|
-
if self.chat_type != "t2t":
|
|
73
|
-
AVAILABLE_MODELS = [
|
|
74
|
-
'qwen-plus-latest', 'qvq-72b-preview',
|
|
75
|
-
'qvq-32b', 'qwen-turbo-latest',
|
|
76
|
-
'qwen-max-latest'
|
|
77
|
-
]
|
|
78
89
|
|
|
79
90
|
self.__available_optimizers = (
|
|
80
91
|
method
|
|
@@ -94,19 +105,14 @@ class QwenLM(Provider):
|
|
|
94
105
|
)
|
|
95
106
|
self.conversation.history_offset = history_offset
|
|
96
107
|
|
|
97
|
-
def _load_cookies(self) -> tuple[
|
|
98
|
-
"""Load cookies from a JSON file and build a cookie
|
|
108
|
+
def _load_cookies(self) -> tuple[dict, str]:
|
|
109
|
+
"""Load cookies from a JSON file and build a cookie dict."""
|
|
99
110
|
try:
|
|
100
111
|
with open(self.cookies_path, "r") as f:
|
|
101
112
|
cookies = json.load(f)
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
token = next(
|
|
106
|
-
(cookie.get("value") for cookie in cookies if cookie.get("name") == "token"),
|
|
107
|
-
"",
|
|
108
|
-
)
|
|
109
|
-
return cookie_string, token
|
|
113
|
+
cookies_dict = {cookie['name']: cookie['value'] for cookie in cookies}
|
|
114
|
+
token = cookies_dict.get("token", "")
|
|
115
|
+
return cookies_dict, token
|
|
110
116
|
except FileNotFoundError:
|
|
111
117
|
raise exceptions.InvalidAuthenticationError(
|
|
112
118
|
"Error: cookies.json file not found!"
|
|
@@ -138,14 +144,22 @@ class QwenLM(Provider):
|
|
|
138
144
|
)
|
|
139
145
|
|
|
140
146
|
payload = {
|
|
141
|
-
'
|
|
147
|
+
'stream': stream,
|
|
148
|
+
'incremental_output': False,
|
|
149
|
+
"chat_type": "t2t",
|
|
150
|
+
"model": self.model,
|
|
142
151
|
"messages": [
|
|
143
|
-
{
|
|
144
|
-
|
|
152
|
+
{
|
|
153
|
+
"role": "user",
|
|
154
|
+
"content": conversation_prompt,
|
|
155
|
+
"chat_type": "t2t",
|
|
156
|
+
"extra": {},
|
|
157
|
+
"feature_config": {"thinking_enabled": False},
|
|
158
|
+
}
|
|
145
159
|
],
|
|
146
|
-
"
|
|
147
|
-
"
|
|
148
|
-
"
|
|
160
|
+
"session_id": str(uuid.uuid4()),
|
|
161
|
+
"chat_id": str(uuid.uuid4()),
|
|
162
|
+
"id": str(uuid.uuid4()),
|
|
149
163
|
}
|
|
150
164
|
|
|
151
165
|
def for_stream() -> Generator[Dict[str, Any], None, None]:
|
|
@@ -158,34 +172,29 @@ class QwenLM(Provider):
|
|
|
158
172
|
)
|
|
159
173
|
|
|
160
174
|
cumulative_text = ""
|
|
161
|
-
for line in response.iter_lines(decode_unicode=
|
|
162
|
-
if line
|
|
163
|
-
|
|
164
|
-
if data
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
new_content
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
cumulative_text = new_content
|
|
185
|
-
if delta:
|
|
186
|
-
yield delta if raw else {"text": delta}
|
|
187
|
-
except json.JSONDecodeError:
|
|
188
|
-
continue
|
|
175
|
+
for line in response.iter_lines(decode_unicode=False):
|
|
176
|
+
if line:
|
|
177
|
+
line = line.decode('utf-8') if isinstance(line, bytes) else line
|
|
178
|
+
if line.startswith("data: "):
|
|
179
|
+
data = line[6:]
|
|
180
|
+
if data == "[DONE]":
|
|
181
|
+
break
|
|
182
|
+
try:
|
|
183
|
+
json_data = json.loads(data)
|
|
184
|
+
if "response.created" in json_data:
|
|
185
|
+
# Initial response, can ignore or use for chat_id etc.
|
|
186
|
+
continue
|
|
187
|
+
if "choices" in json_data:
|
|
188
|
+
delta = json_data["choices"][0]["delta"]
|
|
189
|
+
new_content = delta.get("content", "")
|
|
190
|
+
status = delta.get("status", "")
|
|
191
|
+
if status == "finished":
|
|
192
|
+
break
|
|
193
|
+
cumulative_text += new_content
|
|
194
|
+
if new_content:
|
|
195
|
+
yield delta if raw else {"text": new_content}
|
|
196
|
+
except json.JSONDecodeError:
|
|
197
|
+
continue
|
|
189
198
|
self.last_response.update(dict(text=cumulative_text))
|
|
190
199
|
self.conversation.update_chat_history(
|
|
191
200
|
prompt, self.get_message(self.last_response)
|
|
@@ -193,29 +202,35 @@ class QwenLM(Provider):
|
|
|
193
202
|
|
|
194
203
|
def for_non_stream() -> Dict[str, Any]:
|
|
195
204
|
"""
|
|
196
|
-
Handles non-streaming responses by
|
|
205
|
+
Handles non-streaming responses by making a non-streaming request.
|
|
197
206
|
"""
|
|
198
207
|
|
|
199
|
-
#
|
|
200
|
-
|
|
208
|
+
# Create a non-streaming payload
|
|
209
|
+
non_stream_payload = payload.copy()
|
|
210
|
+
non_stream_payload['stream'] = False
|
|
211
|
+
non_stream_payload['incremental_output'] = False
|
|
201
212
|
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
except Exception as e:
|
|
210
|
-
raise
|
|
213
|
+
response = self.session.post(
|
|
214
|
+
self.api_endpoint, json=non_stream_payload, headers=self.headers, stream=False, timeout=self.timeout
|
|
215
|
+
)
|
|
216
|
+
if not response.ok:
|
|
217
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
218
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
219
|
+
)
|
|
211
220
|
|
|
212
|
-
|
|
213
|
-
|
|
221
|
+
result = response.json()
|
|
222
|
+
assistant_reply = (
|
|
223
|
+
result.get("choices", [{}])[0]
|
|
224
|
+
.get("message", {})
|
|
225
|
+
.get("content", "")
|
|
226
|
+
)
|
|
214
227
|
|
|
215
|
-
|
|
216
|
-
self.conversation.update_chat_history(
|
|
228
|
+
self.last_response.update({"text": assistant_reply})
|
|
229
|
+
self.conversation.update_chat_history(
|
|
230
|
+
prompt, self.get_message(self.last_response)
|
|
231
|
+
)
|
|
217
232
|
|
|
218
|
-
return {"text":
|
|
233
|
+
return {"text": assistant_reply}
|
|
219
234
|
|
|
220
235
|
return for_stream() if stream else for_non_stream()
|
|
221
236
|
|
|
@@ -246,9 +261,12 @@ class QwenLM(Provider):
|
|
|
246
261
|
|
|
247
262
|
if __name__ == "__main__":
|
|
248
263
|
from rich import print
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
264
|
+
cookies_path = r"C:\Users\koula\Desktop\Webscout\cookies.json"
|
|
265
|
+
for model in QwenLM.AVAILABLE_MODELS:
|
|
266
|
+
ai = QwenLM(cookies_path=cookies_path, model=model)
|
|
267
|
+
response = ai.chat("hi")
|
|
268
|
+
print(f"Model: {model}")
|
|
269
|
+
print(response)
|
|
270
|
+
print("-" * 50)
|
|
253
271
|
# for chunk in response:
|
|
254
272
|
# print(chunk, end="", flush=True)
|
|
@@ -1,3 +1,18 @@
|
|
|
1
1
|
# This file marks the directory as a Python package.
|
|
2
|
-
|
|
3
|
-
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import importlib
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
# Get current directory
|
|
8
|
+
current_dir = Path(__file__).parent
|
|
9
|
+
|
|
10
|
+
# Auto-import all .py files (except __init__.py)
|
|
11
|
+
for file_path in current_dir.glob("*.py"):
|
|
12
|
+
if file_path.name != "__init__.py":
|
|
13
|
+
module_name = file_path.stem
|
|
14
|
+
try:
|
|
15
|
+
module = importlib.import_module(f".{module_name}", package=__name__)
|
|
16
|
+
globals().update(vars(module))
|
|
17
|
+
except ImportError:
|
|
18
|
+
pass # Skip files that can't be imported
|
|
@@ -13,19 +13,18 @@ class Sambanova(Provider):
|
|
|
13
13
|
"""
|
|
14
14
|
A class to interact with the Sambanova API.
|
|
15
15
|
"""
|
|
16
|
-
|
|
16
|
+
required_auth = True
|
|
17
17
|
AVAILABLE_MODELS = [
|
|
18
|
-
"
|
|
19
|
-
"Meta-Llama-3.1-70B-Instruct",
|
|
20
|
-
"Meta-Llama-3.1-405B-Instruct",
|
|
18
|
+
"DeepSeek-R1-0528",
|
|
21
19
|
"DeepSeek-R1-Distill-Llama-70B",
|
|
22
|
-
"
|
|
23
|
-
"
|
|
24
|
-
"
|
|
20
|
+
"DeepSeek-V3.1",
|
|
21
|
+
"gpt-oss-120b",
|
|
22
|
+
"Qwen3-32B",
|
|
23
|
+
"DeepSeek-V3-0324",
|
|
24
|
+
"Meta-Llama-3.1-8B-Instruct",
|
|
25
25
|
"Meta-Llama-3.3-70B-Instruct",
|
|
26
|
-
"
|
|
27
|
-
"
|
|
28
|
-
"QwQ-32B-Preview"
|
|
26
|
+
"Llama-3.3-Swallow-70B-Instruct-v0.4",
|
|
27
|
+
"Llama-4-Maverick-17B-128E-Instruct"
|
|
29
28
|
]
|
|
30
29
|
|
|
31
30
|
def __init__(
|
|
@@ -1,12 +1,18 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
from
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
1
|
+
# This file marks the directory as a Python package.
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import importlib
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
# Get current directory
|
|
8
|
+
current_dir = Path(__file__).parent
|
|
9
|
+
|
|
10
|
+
# Auto-import all .py files (except __init__.py)
|
|
11
|
+
for file_path in current_dir.glob("*.py"):
|
|
12
|
+
if file_path.name != "__init__.py":
|
|
13
|
+
module_name = file_path.stem
|
|
14
|
+
try:
|
|
15
|
+
module = importlib.import_module(f".{module_name}", package=__name__)
|
|
16
|
+
globals().update(vars(module))
|
|
17
|
+
except ImportError:
|
|
18
|
+
pass # Skip files that can't be imported
|
|
@@ -1,10 +1,18 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
from
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
1
|
+
# This file marks the directory as a Python package.
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import importlib
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
# Get current directory
|
|
8
|
+
current_dir = Path(__file__).parent
|
|
9
|
+
|
|
10
|
+
# Auto-import all .py files (except __init__.py)
|
|
11
|
+
for file_path in current_dir.glob("*.py"):
|
|
12
|
+
if file_path.name != "__init__.py":
|
|
13
|
+
module_name = file_path.stem
|
|
14
|
+
try:
|
|
15
|
+
module = importlib.import_module(f".{module_name}", package=__name__)
|
|
16
|
+
globals().update(vars(module))
|
|
17
|
+
except ImportError:
|
|
18
|
+
pass # Skip files that can't be imported
|
|
@@ -14,30 +14,23 @@ class TextPollinationsAI(Provider):
|
|
|
14
14
|
A class to interact with the Pollinations AI API.
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
+
required_auth = False
|
|
17
18
|
AVAILABLE_MODELS = [
|
|
18
19
|
"deepseek-reasoning",
|
|
19
|
-
"
|
|
20
|
-
"gpt-5-nano",
|
|
21
|
-
"llama-fast-roblox",
|
|
22
|
-
"llama-roblox",
|
|
23
|
-
"llamascout",
|
|
20
|
+
"gemini",
|
|
24
21
|
"mistral",
|
|
25
|
-
"mistral-nemo-roblox",
|
|
26
|
-
"mistral-roblox",
|
|
27
22
|
"nova-fast",
|
|
28
23
|
"openai",
|
|
29
24
|
"openai-audio",
|
|
30
25
|
"openai-fast",
|
|
31
|
-
"openai-
|
|
32
|
-
"openai-roblox",
|
|
26
|
+
"openai-reasoning",
|
|
33
27
|
"qwen-coder",
|
|
28
|
+
"roblox-rp",
|
|
34
29
|
"bidara",
|
|
35
30
|
"evil",
|
|
36
|
-
"hypnosis-tracy",
|
|
37
31
|
"midijourney",
|
|
38
32
|
"mirexa",
|
|
39
33
|
"rtist",
|
|
40
|
-
"sur",
|
|
41
34
|
"unity",
|
|
42
35
|
]
|
|
43
36
|
_models_url = "https://text.pollinations.ai/models"
|
|
@@ -315,4 +308,4 @@ if __name__ == "__main__":
|
|
|
315
308
|
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
316
309
|
|
|
317
310
|
except Exception as e:
|
|
318
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
311
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|