webscout 7.8__py3-none-any.whl → 7.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Bard.py +5 -25
- webscout/DWEBS.py +476 -476
- webscout/Extra/__init__.py +2 -0
- webscout/Extra/autocoder/__init__.py +1 -1
- webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -849
- webscout/Extra/tempmail/__init__.py +26 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +156 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Provider/Deepinfra.py +288 -286
- webscout/Provider/ElectronHub.py +709 -716
- webscout/Provider/ExaChat.py +20 -5
- webscout/Provider/Gemini.py +167 -165
- webscout/Provider/Groq.py +38 -24
- webscout/Provider/LambdaChat.py +2 -1
- webscout/Provider/TextPollinationsAI.py +232 -230
- webscout/Provider/__init__.py +0 -4
- webscout/Provider/copilot.py +427 -427
- webscout/Provider/freeaichat.py +8 -1
- webscout/Provider/uncovr.py +312 -299
- webscout/Provider/yep.py +64 -12
- webscout/__init__.py +38 -36
- webscout/cli.py +293 -293
- webscout/conversation.py +350 -17
- webscout/litprinter/__init__.py +59 -667
- webscout/optimizers.py +419 -419
- webscout/update_checker.py +14 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1282 -1282
- webscout/webscout_search_async.py +813 -813
- {webscout-7.8.dist-info → webscout-7.9.dist-info}/METADATA +44 -39
- {webscout-7.8.dist-info → webscout-7.9.dist-info}/RECORD +38 -35
- webscout/Provider/DARKAI.py +0 -225
- webscout/Provider/EDITEE.py +0 -192
- webscout/litprinter/colors.py +0 -54
- {webscout-7.8.dist-info → webscout-7.9.dist-info}/LICENSE.md +0 -0
- {webscout-7.8.dist-info → webscout-7.9.dist-info}/WHEEL +0 -0
- {webscout-7.8.dist-info → webscout-7.9.dist-info}/entry_points.txt +0 -0
- {webscout-7.8.dist-info → webscout-7.9.dist-info}/top_level.txt +0 -0
webscout/Provider/ExaChat.py
CHANGED
|
@@ -18,6 +18,7 @@ MODEL_CONFIGS = {
|
|
|
18
18
|
"endpoint": "https://exa-chat.vercel.app/api/gemini",
|
|
19
19
|
"models": [
|
|
20
20
|
"gemini-2.0-flash",
|
|
21
|
+
"gemini-2.0-flash-exp-image-generation",
|
|
21
22
|
"gemini-2.0-flash-thinking-exp-01-21",
|
|
22
23
|
"gemini-2.5-pro-exp-03-25",
|
|
23
24
|
"gemini-2.0-pro-exp-02-05",
|
|
@@ -51,6 +52,13 @@ MODEL_CONFIGS = {
|
|
|
51
52
|
"qwen-qwq-32b"
|
|
52
53
|
],
|
|
53
54
|
},
|
|
55
|
+
"cerebras": {
|
|
56
|
+
"endpoint": "https://exa-chat.vercel.app/api/cerebras",
|
|
57
|
+
"models": [
|
|
58
|
+
"llama3.1-8b",
|
|
59
|
+
"llama-3.3-70b"
|
|
60
|
+
],
|
|
61
|
+
},
|
|
54
62
|
}
|
|
55
63
|
|
|
56
64
|
class ExaChat(Provider):
|
|
@@ -87,7 +95,11 @@ class ExaChat(Provider):
|
|
|
87
95
|
"llama3-8b-8192",
|
|
88
96
|
"qwen-2.5-32b",
|
|
89
97
|
"qwen-2.5-coder-32b",
|
|
90
|
-
"qwen-qwq-32b"
|
|
98
|
+
"qwen-qwq-32b",
|
|
99
|
+
|
|
100
|
+
# Cerebras Models
|
|
101
|
+
"llama3.1-8b",
|
|
102
|
+
"llama-3.3-70b"
|
|
91
103
|
]
|
|
92
104
|
|
|
93
105
|
def __init__(
|
|
@@ -206,6 +218,12 @@ class ExaChat(Provider):
|
|
|
206
218
|
"model": self.model,
|
|
207
219
|
"messages": []
|
|
208
220
|
}
|
|
221
|
+
elif self.provider == "cerebras":
|
|
222
|
+
return {
|
|
223
|
+
"query": conversation_prompt,
|
|
224
|
+
"model": self.model,
|
|
225
|
+
"messages": []
|
|
226
|
+
}
|
|
209
227
|
else: # openrouter or groq
|
|
210
228
|
return {
|
|
211
229
|
"query": conversation_prompt + "\n", # Add newline for openrouter and groq models
|
|
@@ -246,10 +264,7 @@ class ExaChat(Provider):
|
|
|
246
264
|
full_response += content
|
|
247
265
|
except json.JSONDecodeError:
|
|
248
266
|
continue
|
|
249
|
-
|
|
250
|
-
if not raw:
|
|
251
|
-
print() # New line after response
|
|
252
|
-
|
|
267
|
+
|
|
253
268
|
self.last_response = {"text": full_response}
|
|
254
269
|
self.conversation.update_chat_history(prompt, full_response)
|
|
255
270
|
return self.last_response
|
webscout/Provider/Gemini.py
CHANGED
|
@@ -1,165 +1,167 @@
|
|
|
1
|
-
from os import path
|
|
2
|
-
from json import load, dumps
|
|
3
|
-
import warnings
|
|
4
|
-
from typing import Union, Any, Dict
|
|
5
|
-
|
|
6
|
-
# Import internal modules and dependencies
|
|
7
|
-
from ..AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
|
|
8
|
-
from ..AIbase import Provider, AsyncProvider
|
|
9
|
-
from ..Bard import Chatbot, Model
|
|
10
|
-
|
|
11
|
-
warnings.simplefilter("ignore", category=UserWarning)
|
|
12
|
-
|
|
13
|
-
# Define model aliases for easy usage
|
|
14
|
-
MODEL_ALIASES: Dict[str, Model] = {
|
|
15
|
-
"unspecified": Model.UNSPECIFIED,
|
|
16
|
-
"flash": Model.G_2_0_FLASH,
|
|
17
|
-
"flash-
|
|
18
|
-
"
|
|
19
|
-
"
|
|
20
|
-
"exp-advanced": Model.
|
|
21
|
-
|
|
22
|
-
"
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
if not
|
|
55
|
-
raise
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
self.
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
self.
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
self.session.async_chatbot.
|
|
1
|
+
from os import path
|
|
2
|
+
from json import load, dumps
|
|
3
|
+
import warnings
|
|
4
|
+
from typing import Union, Any, Dict
|
|
5
|
+
|
|
6
|
+
# Import internal modules and dependencies
|
|
7
|
+
from ..AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
|
|
8
|
+
from ..AIbase import Provider, AsyncProvider
|
|
9
|
+
from ..Bard import Chatbot, Model
|
|
10
|
+
|
|
11
|
+
warnings.simplefilter("ignore", category=UserWarning)
|
|
12
|
+
|
|
13
|
+
# Define model aliases for easy usage
|
|
14
|
+
MODEL_ALIASES: Dict[str, Model] = {
|
|
15
|
+
"unspecified": Model.UNSPECIFIED,
|
|
16
|
+
"gemini-2.0-flash": Model.G_2_0_FLASH,
|
|
17
|
+
"gemini-2.0-flash-thinking": Model.G_2_0_FLASH_THINKING,
|
|
18
|
+
"gemini-2.5-pro": Model.G_2_5_PRO,
|
|
19
|
+
"gemini-2.0-exp-advanced": Model.G_2_0_EXP_ADVANCED,
|
|
20
|
+
"gemini-2.5-exp-advanced": Model.G_2_5_EXP_ADVANCED,
|
|
21
|
+
# Add shorter aliases for convenience
|
|
22
|
+
"flash": Model.G_2_0_FLASH,
|
|
23
|
+
"thinking": Model.G_2_0_FLASH_THINKING,
|
|
24
|
+
"pro": Model.G_2_5_PRO,
|
|
25
|
+
"advanced": Model.G_2_0_EXP_ADVANCED,
|
|
26
|
+
"advanced-2.5": Model.G_2_5_EXP_ADVANCED,
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
# List of available models (friendly names)
|
|
30
|
+
AVAILABLE_MODELS = list(MODEL_ALIASES.keys())
|
|
31
|
+
|
|
32
|
+
class GEMINI(Provider):
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
cookie_file: str,
|
|
36
|
+
model: str = "flash", # Accepts either a Model enum or a str alias.
|
|
37
|
+
proxy: dict = {},
|
|
38
|
+
timeout: int = 30,
|
|
39
|
+
):
|
|
40
|
+
"""
|
|
41
|
+
Initializes GEMINI with model support.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
cookie_file (str): Path to the cookies JSON file.
|
|
45
|
+
model (Model or str): Selected model for the session. Can be a Model enum
|
|
46
|
+
or a string alias. Available aliases: flash, flash-exp, thinking, thinking-with-apps,
|
|
47
|
+
exp-advanced, 2.5-exp-advanced, 2.5-pro, 1.5-flash, 1.5-pro, 1.5-pro-research.
|
|
48
|
+
proxy (dict, optional): HTTP request proxy. Defaults to {}.
|
|
49
|
+
timeout (int, optional): HTTP request timeout in seconds. Defaults to 30.
|
|
50
|
+
"""
|
|
51
|
+
self.conversation = Conversation(False)
|
|
52
|
+
|
|
53
|
+
# Ensure cookie_file existence.
|
|
54
|
+
if not isinstance(cookie_file, str):
|
|
55
|
+
raise TypeError(f"cookie_file should be of type str, not '{type(cookie_file)}'")
|
|
56
|
+
if not path.isfile(cookie_file):
|
|
57
|
+
raise Exception(f"{cookie_file} is not a valid file path")
|
|
58
|
+
|
|
59
|
+
# If model is provided as alias (str), convert to Model enum.
|
|
60
|
+
if isinstance(model, str):
|
|
61
|
+
alias = model.lower()
|
|
62
|
+
if alias in MODEL_ALIASES:
|
|
63
|
+
selected_model = MODEL_ALIASES[alias]
|
|
64
|
+
else:
|
|
65
|
+
raise Exception(f"Unknown model alias: '{model}'. Available aliases: {', '.join(AVAILABLE_MODELS)}")
|
|
66
|
+
elif isinstance(model, Model):
|
|
67
|
+
selected_model = model
|
|
68
|
+
else:
|
|
69
|
+
raise TypeError("model must be a string alias or an instance of Model")
|
|
70
|
+
|
|
71
|
+
# Initialize the Chatbot session using the cookie file.
|
|
72
|
+
self.session = Chatbot(cookie_file, proxy, timeout, selected_model)
|
|
73
|
+
self.last_response = {}
|
|
74
|
+
self.__available_optimizers = (
|
|
75
|
+
method for method in dir(Optimizers) if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
76
|
+
)
|
|
77
|
+
# Store cookies from Chatbot for later use (e.g. image generation)
|
|
78
|
+
self.session_auth1 = self.session.secure_1psid
|
|
79
|
+
self.session_auth2 = self.session.secure_1psidts
|
|
80
|
+
|
|
81
|
+
def ask(
|
|
82
|
+
self,
|
|
83
|
+
prompt: str,
|
|
84
|
+
stream: bool = False,
|
|
85
|
+
raw: bool = False,
|
|
86
|
+
optimizer: str = None,
|
|
87
|
+
conversationally: bool = False,
|
|
88
|
+
) -> dict:
|
|
89
|
+
"""Chat with AI.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
prompt (str): Prompt to be sent.
|
|
93
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
94
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
95
|
+
optimizer (str, optional): Prompt optimizer name (e.g., 'code', 'shell_command'). Defaults to None.
|
|
96
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
dict: Response generated by the underlying Chatbot.
|
|
100
|
+
"""
|
|
101
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
102
|
+
if optimizer:
|
|
103
|
+
if optimizer in self.__available_optimizers:
|
|
104
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
105
|
+
conversation_prompt if conversationally else prompt
|
|
106
|
+
)
|
|
107
|
+
else:
|
|
108
|
+
raise Exception(f"Optimizer is not one of {', '.join(self.__available_optimizers)}")
|
|
109
|
+
|
|
110
|
+
def for_stream():
|
|
111
|
+
response = self.session.ask(prompt)
|
|
112
|
+
self.last_response.update(response)
|
|
113
|
+
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
114
|
+
yield dumps(response) if raw else response
|
|
115
|
+
|
|
116
|
+
def for_non_stream():
|
|
117
|
+
for _ in for_stream():
|
|
118
|
+
pass
|
|
119
|
+
return self.last_response
|
|
120
|
+
|
|
121
|
+
return for_stream() if stream else for_non_stream()
|
|
122
|
+
|
|
123
|
+
def chat(
|
|
124
|
+
self,
|
|
125
|
+
prompt: str,
|
|
126
|
+
stream: bool = False,
|
|
127
|
+
optimizer: str = None,
|
|
128
|
+
conversationally: bool = False,
|
|
129
|
+
) -> str:
|
|
130
|
+
"""Generate response text.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
prompt (str): Prompt to be sent.
|
|
134
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
135
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
136
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
str: Response generated.
|
|
140
|
+
"""
|
|
141
|
+
def for_stream():
|
|
142
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
143
|
+
yield self.get_message(response)
|
|
144
|
+
|
|
145
|
+
def for_non_stream():
|
|
146
|
+
return self.get_message(self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally))
|
|
147
|
+
|
|
148
|
+
return for_stream() if stream else for_non_stream()
|
|
149
|
+
|
|
150
|
+
def get_message(self, response: dict) -> str:
|
|
151
|
+
"""Retrieves message content from the response.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
response (dict): Response generated by `self.ask`.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
str: Extracted message content.
|
|
158
|
+
"""
|
|
159
|
+
if not isinstance(response, dict):
|
|
160
|
+
raise TypeError("Response should be of type dict")
|
|
161
|
+
return response["content"]
|
|
162
|
+
|
|
163
|
+
def reset(self):
|
|
164
|
+
"""Reset the current conversation."""
|
|
165
|
+
self.session.async_chatbot.conversation_id = ""
|
|
166
|
+
self.session.async_chatbot.response_id = ""
|
|
167
|
+
self.session.async_chatbot.choice_id = ""
|
webscout/Provider/Groq.py
CHANGED
|
@@ -16,26 +16,31 @@ class GROQ(Provider):
|
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
18
|
AVAILABLE_MODELS = [
|
|
19
|
-
|
|
19
|
+
"distil-whisper-large-v3-en",
|
|
20
|
+
"gemma2-9b-it",
|
|
21
|
+
"llama-3.3-70b-versatile",
|
|
22
|
+
"llama-3.1-8b-instant",
|
|
23
|
+
"llama-guard-3-8b",
|
|
20
24
|
"llama3-70b-8192",
|
|
21
|
-
"
|
|
25
|
+
"llama3-8b-8192",
|
|
26
|
+
"whisper-large-v3",
|
|
27
|
+
"whisper-large-v3-turbo",
|
|
28
|
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
29
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
30
|
+
"playai-tts",
|
|
31
|
+
"playai-tts-arabic",
|
|
32
|
+
"qwen-qwq-32b",
|
|
33
|
+
"mistral-saba-24b",
|
|
22
34
|
"qwen-2.5-coder-32b",
|
|
35
|
+
"qwen-2.5-32b",
|
|
23
36
|
"deepseek-r1-distill-qwen-32b",
|
|
24
37
|
"deepseek-r1-distill-llama-70b",
|
|
38
|
+
"llama-3.3-70b-specdec",
|
|
39
|
+
"llama-3.2-1b-preview",
|
|
25
40
|
"llama-3.2-3b-preview",
|
|
26
|
-
"gemma2-9b-it",
|
|
27
|
-
"llama-3.2-11b-vision-preview",
|
|
28
|
-
"llama3-8b-8192",
|
|
29
|
-
"llama-3.3-70b-versatile",
|
|
30
41
|
"llama-3.2-11b-vision-preview",
|
|
31
|
-
# "distil-whisper-large-v3-en",
|
|
32
|
-
"mixtral-8x7b-32768",
|
|
33
|
-
"llama-3.3-70b-specdec",
|
|
34
42
|
"llama-3.2-90b-vision-preview",
|
|
35
|
-
"
|
|
36
|
-
# "whisper-large-v3-turbo",
|
|
37
|
-
"llama-3.1-8b-instant",
|
|
38
|
-
"llama-guard-3-8b"
|
|
43
|
+
"mixtral-8x7b-32768"
|
|
39
44
|
]
|
|
40
45
|
|
|
41
46
|
def __init__(
|
|
@@ -345,22 +350,31 @@ class AsyncGROQ(AsyncProvider):
|
|
|
345
350
|
"""
|
|
346
351
|
|
|
347
352
|
AVAILABLE_MODELS = [
|
|
348
|
-
|
|
349
|
-
"llama3-70b-8192",
|
|
350
|
-
"llama-3.2-3b-preview",
|
|
353
|
+
"distil-whisper-large-v3-en",
|
|
351
354
|
"gemma2-9b-it",
|
|
352
|
-
"llama-3.2-11b-vision-preview",
|
|
353
|
-
"llama3-8b-8192",
|
|
354
355
|
"llama-3.3-70b-versatile",
|
|
356
|
+
"llama-3.1-8b-instant",
|
|
357
|
+
"llama-guard-3-8b",
|
|
358
|
+
"llama3-70b-8192",
|
|
359
|
+
"llama3-8b-8192",
|
|
360
|
+
"whisper-large-v3",
|
|
361
|
+
"whisper-large-v3-turbo",
|
|
362
|
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
363
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
364
|
+
"playai-tts",
|
|
365
|
+
"playai-tts-arabic",
|
|
366
|
+
"qwen-qwq-32b",
|
|
367
|
+
"mistral-saba-24b",
|
|
368
|
+
"qwen-2.5-coder-32b",
|
|
369
|
+
"qwen-2.5-32b",
|
|
370
|
+
"deepseek-r1-distill-qwen-32b",
|
|
355
371
|
"deepseek-r1-distill-llama-70b",
|
|
356
|
-
# "distil-whisper-large-v3-en",
|
|
357
|
-
"mixtral-8x7b-32768",
|
|
358
372
|
"llama-3.3-70b-specdec",
|
|
359
|
-
"llama-3.2-90b-vision-preview",
|
|
360
373
|
"llama-3.2-1b-preview",
|
|
361
|
-
|
|
362
|
-
"llama-3.
|
|
363
|
-
"llama-
|
|
374
|
+
"llama-3.2-3b-preview",
|
|
375
|
+
"llama-3.2-11b-vision-preview",
|
|
376
|
+
"llama-3.2-90b-vision-preview",
|
|
377
|
+
"mixtral-8x7b-32768"
|
|
364
378
|
]
|
|
365
379
|
|
|
366
380
|
def __init__(
|
webscout/Provider/LambdaChat.py
CHANGED