webscout 8.3.5__py3-none-any.whl → 8.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
- webscout/Provider/AISEARCH/__init__.py +1 -1
- webscout/Provider/Deepinfra.py +6 -0
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/GithubChat.py +1 -0
- webscout/Provider/GptOss.py +207 -0
- webscout/Provider/Kimi.py +445 -0
- webscout/Provider/Netwrck.py +3 -6
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +50 -55
- webscout/Provider/OPENAI/__init__.py +4 -2
- webscout/Provider/OPENAI/copilot.py +20 -4
- webscout/Provider/OPENAI/deepinfra.py +6 -0
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/gptoss.py +288 -0
- webscout/Provider/OPENAI/kimi.py +469 -0
- webscout/Provider/OPENAI/netwrck.py +8 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -6
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +10 -9
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +0 -1
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TextPollinationsAI.py +3 -6
- webscout/Provider/TogetherAI.py +50 -55
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/__init__.py +2 -90
- webscout/Provider/cerebras.py +83 -33
- webscout/Provider/copilot.py +42 -23
- webscout/Provider/toolbaz.py +1 -0
- webscout/conversation.py +22 -20
- webscout/sanitize.py +14 -10
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/METADATA +2 -1
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/RECORD +59 -56
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/TTS/sthir.py +0 -94
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
from regex import R
|
|
2
|
+
import requests
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import uuid
|
|
6
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
|
+
|
|
8
|
+
from webscout.Provider.Deepinfra import DeepInfra
|
|
9
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
+
from webscout.Provider.OPENAI.utils import (
|
|
11
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
+
ChatCompletionMessage, CompletionUsage
|
|
13
|
+
)
|
|
14
|
+
try:
|
|
15
|
+
from .generate_api_key import generate_full_api_key
|
|
16
|
+
except ImportError:
|
|
17
|
+
# Fallback: define the function inline if import fails
|
|
18
|
+
import random
|
|
19
|
+
import string
|
|
20
|
+
|
|
21
|
+
def generate_api_key_suffix(length: int = 4) -> str:
|
|
22
|
+
"""Generate a random API key suffix like 'C1Z5'"""
|
|
23
|
+
chars = string.ascii_uppercase + string.digits
|
|
24
|
+
return ''.join(random.choice(chars) for _ in range(length))
|
|
25
|
+
|
|
26
|
+
def generate_full_api_key(prefix: str = "EU1CW20nX5oau42xBSgm") -> str:
|
|
27
|
+
"""Generate a full API key with a random suffix"""
|
|
28
|
+
suffix = generate_api_key_suffix(4)
|
|
29
|
+
return prefix + suffix
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
from webscout.litagent import LitAgent
|
|
33
|
+
except ImportError:
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
class Completions(BaseCompletions):
|
|
37
|
+
def __init__(self, client: 'Refact'):
|
|
38
|
+
self._client = client
|
|
39
|
+
|
|
40
|
+
def create(
|
|
41
|
+
self,
|
|
42
|
+
*,
|
|
43
|
+
model: str,
|
|
44
|
+
messages: List[Dict[str, str]],
|
|
45
|
+
max_tokens: Optional[int] = 2049,
|
|
46
|
+
stream: bool = False,
|
|
47
|
+
temperature: Optional[float] = None,
|
|
48
|
+
top_p: Optional[float] = None,
|
|
49
|
+
timeout: Optional[int] = None,
|
|
50
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
51
|
+
**kwargs: Any
|
|
52
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
53
|
+
payload = {
|
|
54
|
+
"model": model,
|
|
55
|
+
"messages": messages,
|
|
56
|
+
"max_tokens": max_tokens,
|
|
57
|
+
"stream": stream,
|
|
58
|
+
}
|
|
59
|
+
if temperature is not None:
|
|
60
|
+
payload["temperature"] = temperature
|
|
61
|
+
if top_p is not None:
|
|
62
|
+
payload["top_p"] = top_p
|
|
63
|
+
payload.update(kwargs)
|
|
64
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
65
|
+
created_time = int(time.time())
|
|
66
|
+
if stream:
|
|
67
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
68
|
+
else:
|
|
69
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
70
|
+
|
|
71
|
+
def _create_stream(
|
|
72
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
73
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
74
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
75
|
+
try:
|
|
76
|
+
response = self._client.session.post(
|
|
77
|
+
self._client.base_url,
|
|
78
|
+
headers=self._client.headers,
|
|
79
|
+
json=payload,
|
|
80
|
+
stream=True,
|
|
81
|
+
timeout=timeout or self._client.timeout,
|
|
82
|
+
proxies=proxies
|
|
83
|
+
)
|
|
84
|
+
response.raise_for_status()
|
|
85
|
+
prompt_tokens = 0
|
|
86
|
+
completion_tokens = 0
|
|
87
|
+
total_tokens = 0
|
|
88
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
89
|
+
if line:
|
|
90
|
+
if line.startswith("data: "):
|
|
91
|
+
json_str = line[6:]
|
|
92
|
+
if json_str == "[DONE]":
|
|
93
|
+
break
|
|
94
|
+
try:
|
|
95
|
+
data = json.loads(json_str)
|
|
96
|
+
choice_data = data.get('choices', [{}])[0]
|
|
97
|
+
delta_data = choice_data.get('delta', {})
|
|
98
|
+
finish_reason = choice_data.get('finish_reason')
|
|
99
|
+
usage_data = data.get('usage', {})
|
|
100
|
+
if usage_data:
|
|
101
|
+
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
102
|
+
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
103
|
+
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
104
|
+
if delta_data.get('content'):
|
|
105
|
+
completion_tokens += 1
|
|
106
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
107
|
+
delta = ChoiceDelta(
|
|
108
|
+
content=delta_data.get('content'),
|
|
109
|
+
role=delta_data.get('role'),
|
|
110
|
+
tool_calls=delta_data.get('tool_calls')
|
|
111
|
+
)
|
|
112
|
+
choice = Choice(
|
|
113
|
+
index=choice_data.get('index', 0),
|
|
114
|
+
delta=delta,
|
|
115
|
+
finish_reason=finish_reason,
|
|
116
|
+
logprobs=choice_data.get('logprobs')
|
|
117
|
+
)
|
|
118
|
+
chunk = ChatCompletionChunk(
|
|
119
|
+
id=request_id,
|
|
120
|
+
choices=[choice],
|
|
121
|
+
created=created_time,
|
|
122
|
+
model=model,
|
|
123
|
+
system_fingerprint=data.get('system_fingerprint')
|
|
124
|
+
)
|
|
125
|
+
chunk.usage = {
|
|
126
|
+
"prompt_tokens": prompt_tokens,
|
|
127
|
+
"completion_tokens": completion_tokens,
|
|
128
|
+
"total_tokens": total_tokens,
|
|
129
|
+
"estimated_cost": None
|
|
130
|
+
}
|
|
131
|
+
yield chunk
|
|
132
|
+
except json.JSONDecodeError:
|
|
133
|
+
continue
|
|
134
|
+
# Final chunk with finish_reason="stop"
|
|
135
|
+
delta = ChoiceDelta(content=None, role=None, tool_calls=None)
|
|
136
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
|
|
137
|
+
chunk = ChatCompletionChunk(
|
|
138
|
+
id=request_id,
|
|
139
|
+
choices=[choice],
|
|
140
|
+
created=created_time,
|
|
141
|
+
model=model,
|
|
142
|
+
system_fingerprint=None
|
|
143
|
+
)
|
|
144
|
+
chunk.usage = {
|
|
145
|
+
"prompt_tokens": prompt_tokens,
|
|
146
|
+
"completion_tokens": completion_tokens,
|
|
147
|
+
"total_tokens": total_tokens,
|
|
148
|
+
"estimated_cost": None
|
|
149
|
+
}
|
|
150
|
+
yield chunk
|
|
151
|
+
except Exception as e:
|
|
152
|
+
print(f"Error during Refact stream request: {e}")
|
|
153
|
+
raise IOError(f"Refact request failed: {e}") from e
|
|
154
|
+
|
|
155
|
+
def _create_non_stream(
|
|
156
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
157
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
158
|
+
) -> ChatCompletion:
|
|
159
|
+
try:
|
|
160
|
+
response = self._client.session.post(
|
|
161
|
+
self._client.base_url,
|
|
162
|
+
headers=self._client.headers,
|
|
163
|
+
json=payload,
|
|
164
|
+
timeout=timeout or self._client.timeout,
|
|
165
|
+
proxies=proxies
|
|
166
|
+
)
|
|
167
|
+
response.raise_for_status()
|
|
168
|
+
data = response.json()
|
|
169
|
+
choices_data = data.get('choices', [])
|
|
170
|
+
usage_data = data.get('usage', {})
|
|
171
|
+
choices = []
|
|
172
|
+
for choice_d in choices_data:
|
|
173
|
+
message_d = choice_d.get('message')
|
|
174
|
+
if not message_d and 'delta' in choice_d:
|
|
175
|
+
delta = choice_d['delta']
|
|
176
|
+
message_d = {
|
|
177
|
+
'role': delta.get('role', 'assistant'),
|
|
178
|
+
'content': delta.get('content', '')
|
|
179
|
+
}
|
|
180
|
+
if not message_d:
|
|
181
|
+
message_d = {'role': 'assistant', 'content': ''}
|
|
182
|
+
message = ChatCompletionMessage(
|
|
183
|
+
role=message_d.get('role', 'assistant'),
|
|
184
|
+
content=message_d.get('content', '')
|
|
185
|
+
)
|
|
186
|
+
choice = Choice(
|
|
187
|
+
index=choice_d.get('index', 0),
|
|
188
|
+
message=message,
|
|
189
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
190
|
+
)
|
|
191
|
+
choices.append(choice)
|
|
192
|
+
usage = CompletionUsage(
|
|
193
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
194
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
195
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
196
|
+
)
|
|
197
|
+
completion = ChatCompletion(
|
|
198
|
+
id=request_id,
|
|
199
|
+
choices=choices,
|
|
200
|
+
created=created_time,
|
|
201
|
+
model=data.get('model', model),
|
|
202
|
+
usage=usage,
|
|
203
|
+
)
|
|
204
|
+
return completion
|
|
205
|
+
except Exception as e:
|
|
206
|
+
print(f"Error during Refact non-stream request: {e}")
|
|
207
|
+
raise IOError(f"Refact request failed: {e}") from e
|
|
208
|
+
|
|
209
|
+
class Chat(BaseChat):
|
|
210
|
+
def __init__(self, client: 'Refact'):
|
|
211
|
+
self.completions = Completions(client)
|
|
212
|
+
|
|
213
|
+
class Refact(OpenAICompatibleProvider):
|
|
214
|
+
AVAILABLE_MODELS = [
|
|
215
|
+
"gpt-4o",
|
|
216
|
+
"gpt-4o-mini",
|
|
217
|
+
"o4-mini",
|
|
218
|
+
"gpt-4.1",
|
|
219
|
+
"gpt-4.1-mini",
|
|
220
|
+
"gpt-4.1-nano",
|
|
221
|
+
"gpt-5",
|
|
222
|
+
"gpt-5-mini",
|
|
223
|
+
"gpt-5-nano",
|
|
224
|
+
"claude-sonnet-4",
|
|
225
|
+
"claude-opus-4",
|
|
226
|
+
"claude-opus-4.1",
|
|
227
|
+
"gemini-2.5-pro",
|
|
228
|
+
"gemini-2.5-pro-preview"
|
|
229
|
+
]
|
|
230
|
+
|
|
231
|
+
def __init__(self, api_key: str = None, browser: str = "chrome"):
|
|
232
|
+
# Mirror DeepInfra constructor signature but use the lightweight headers from lol.py
|
|
233
|
+
self.timeout = None
|
|
234
|
+
self.base_url = "https://inference.smallcloud.ai/v1/chat/completions"
|
|
235
|
+
self.session = requests.Session()
|
|
236
|
+
|
|
237
|
+
# Use minimal headers consistent with lol.py
|
|
238
|
+
self.headers = {
|
|
239
|
+
"Content-Type": "application/json",
|
|
240
|
+
"User-Agent": "refact-lsp 0.10.19",
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
if api_key:
|
|
244
|
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
245
|
+
else:
|
|
246
|
+
self.headers["Authorization"] = f"Bearer {generate_full_api_key()}"
|
|
247
|
+
|
|
248
|
+
# Try to initialize LitAgent for compatibility, but do not alter headers (keep lol.py style)
|
|
249
|
+
try:
|
|
250
|
+
_ = LitAgent()
|
|
251
|
+
except Exception:
|
|
252
|
+
pass
|
|
253
|
+
|
|
254
|
+
self.session.headers.update(self.headers)
|
|
255
|
+
self.chat = Chat(self)
|
|
256
|
+
|
|
257
|
+
@property
|
|
258
|
+
def models(self):
|
|
259
|
+
class _ModelList:
|
|
260
|
+
def list(inner_self):
|
|
261
|
+
return type(self).AVAILABLE_MODELS
|
|
262
|
+
return _ModelList()
|
|
263
|
+
|
|
264
|
+
if __name__ == "__main__":
|
|
265
|
+
client = Refact()
|
|
266
|
+
response = client.chat.completions.create(
|
|
267
|
+
model="claude-opus-4.1",
|
|
268
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
269
|
+
max_tokens=10000,
|
|
270
|
+
stream=True
|
|
271
|
+
)
|
|
272
|
+
for chunk in response:
|
|
273
|
+
if chunk.choices[0].delta.content:
|
|
274
|
+
print(chunk.choices[0].delta.content, end='', flush=True)
|
|
@@ -276,26 +276,23 @@ class TextPollinations(OpenAICompatibleProvider):
|
|
|
276
276
|
"""
|
|
277
277
|
|
|
278
278
|
AVAILABLE_MODELS = [
|
|
279
|
-
"deepseek",
|
|
280
279
|
"deepseek-reasoning",
|
|
281
|
-
"
|
|
282
|
-
"
|
|
280
|
+
"glm",
|
|
281
|
+
"gpt-5-nano",
|
|
283
282
|
"llama-fast-roblox",
|
|
284
283
|
"llama-roblox",
|
|
285
284
|
"llamascout",
|
|
286
285
|
"mistral",
|
|
287
286
|
"mistral-nemo-roblox",
|
|
288
287
|
"mistral-roblox",
|
|
288
|
+
"nova-fast",
|
|
289
289
|
"openai",
|
|
290
290
|
"openai-audio",
|
|
291
291
|
"openai-fast",
|
|
292
292
|
"openai-large",
|
|
293
|
-
"openai-reasoning",
|
|
294
293
|
"openai-roblox",
|
|
295
|
-
"phi",
|
|
296
294
|
"qwen-coder",
|
|
297
295
|
"bidara",
|
|
298
|
-
"elixposearch",
|
|
299
296
|
"evil",
|
|
300
297
|
"hypnosis-tracy",
|
|
301
298
|
"midijourney",
|
webscout/Provider/TTI/bing.py
CHANGED
|
@@ -20,7 +20,7 @@ class Images(BaseImages):
|
|
|
20
20
|
def create(
|
|
21
21
|
self,
|
|
22
22
|
*,
|
|
23
|
-
model: str = "
|
|
23
|
+
model: str = "dalle",
|
|
24
24
|
prompt: str,
|
|
25
25
|
n: int = 1,
|
|
26
26
|
size: str = "1024x1024",
|
|
@@ -42,10 +42,21 @@ class Images(BaseImages):
|
|
|
42
42
|
headers = self._client.headers
|
|
43
43
|
images = []
|
|
44
44
|
urls = []
|
|
45
|
+
|
|
46
|
+
# Map model names to Bing model codes
|
|
47
|
+
model_mapping = {
|
|
48
|
+
"dalle": "0",
|
|
49
|
+
"gpt4o": "1",
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# Get the appropriate model code
|
|
53
|
+
model_code = model_mapping.get(model.lower(), "4")
|
|
54
|
+
|
|
45
55
|
for _ in range(n):
|
|
46
56
|
data = {
|
|
47
57
|
"q": prompt,
|
|
48
58
|
"rt": "4",
|
|
59
|
+
"mdl": model_code,
|
|
49
60
|
"FORM": "GENCRE"
|
|
50
61
|
}
|
|
51
62
|
response = session.post(
|
|
@@ -221,8 +232,9 @@ class BingImageAI(TTICompatibleProvider):
|
|
|
221
232
|
|
|
222
233
|
if __name__ == "__main__":
|
|
223
234
|
from rich import print
|
|
224
|
-
client = BingImageAI(cookie="
|
|
235
|
+
client = BingImageAI(cookie="1QyBY4Z1eHBW6fbI25kdM5TrlRGWzn5PFySapCOfvvz04zaounFG660EipVJSOXXvcdeXXLwsWHdDI8bNymucF_QnMHSlY1mc0pPI7e9Ar6o-_7e9Ik5QOe1nkJIe5vz22pibioTqx0IfVKwmVbX22A3bFD7ODaSZalKFr-AuxgAaRVod-giTTry6Ei7RVgisF7BHlkMPPwtCeO234ujgug")
|
|
225
236
|
response = client.images.create(
|
|
237
|
+
model="gpt4o",
|
|
226
238
|
prompt="A cat riding a bicycle",
|
|
227
239
|
response_format="url",
|
|
228
240
|
n=4,
|
|
@@ -202,24 +202,25 @@ class Images(BaseImages):
|
|
|
202
202
|
class TogetherImage(TTICompatibleProvider):
|
|
203
203
|
"""
|
|
204
204
|
Together.xyz Text-to-Image provider
|
|
205
|
-
Updated: 2025-
|
|
205
|
+
Updated: 2025-08-01 10:42:41 UTC by OEvortex
|
|
206
206
|
Supports FLUX and other image generation models
|
|
207
207
|
"""
|
|
208
208
|
|
|
209
209
|
# Image models from Together.xyz API (filtered for image type only)
|
|
210
210
|
AVAILABLE_MODELS = [
|
|
211
|
-
"black-forest-labs/FLUX.1-
|
|
212
|
-
"black-forest-labs/FLUX.1.1-pro",
|
|
213
|
-
"black-forest-labs/FLUX.1-redux",
|
|
214
|
-
"black-forest-labs/FLUX.1-dev-lora",
|
|
215
|
-
"black-forest-labs/FLUX.1-schnell",
|
|
211
|
+
"black-forest-labs/FLUX.1-canny",
|
|
216
212
|
"black-forest-labs/FLUX.1-depth",
|
|
217
|
-
"black-forest-labs/FLUX.1-kontext-dev",
|
|
218
213
|
"black-forest-labs/FLUX.1-dev",
|
|
219
|
-
"black-forest-labs/FLUX.1-
|
|
214
|
+
"black-forest-labs/FLUX.1-dev-lora",
|
|
215
|
+
"black-forest-labs/FLUX.1-kontext-dev",
|
|
220
216
|
"black-forest-labs/FLUX.1-kontext-max",
|
|
217
|
+
"black-forest-labs/FLUX.1-kontext-pro",
|
|
218
|
+
"black-forest-labs/FLUX.1-krea-dev",
|
|
219
|
+
"black-forest-labs/FLUX.1-pro",
|
|
220
|
+
"black-forest-labs/FLUX.1-redux",
|
|
221
|
+
"black-forest-labs/FLUX.1-schnell",
|
|
221
222
|
"black-forest-labs/FLUX.1-schnell-Free",
|
|
222
|
-
"black-forest-labs/FLUX.1-
|
|
223
|
+
"black-forest-labs/FLUX.1.1-pro"
|
|
223
224
|
]
|
|
224
225
|
|
|
225
226
|
def __init__(self):
|
webscout/Provider/TTS/README.md
CHANGED