webscout 7.7__py3-none-any.whl → 7.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -1
- webscout/Bard.py +12 -29
- webscout/DWEBS.py +477 -461
- webscout/Extra/__init__.py +2 -0
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -790
- webscout/Extra/autocoder/autocoder_utiles.py +332 -194
- webscout/Extra/gguf.py +682 -682
- webscout/Extra/tempmail/__init__.py +26 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +156 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Provider/AI21.py +1 -1
- webscout/Provider/AISEARCH/DeepFind.py +2 -2
- webscout/Provider/AISEARCH/ISou.py +2 -2
- webscout/Provider/AISEARCH/felo_search.py +6 -6
- webscout/Provider/AISEARCH/genspark_search.py +1 -1
- webscout/Provider/Aitopia.py +292 -0
- webscout/Provider/AllenAI.py +1 -1
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/C4ai.py +1 -1
- webscout/Provider/ChatGPTES.py +3 -5
- webscout/Provider/ChatGPTGratis.py +4 -4
- webscout/Provider/Chatify.py +2 -2
- webscout/Provider/Cloudflare.py +3 -2
- webscout/Provider/DeepSeek.py +2 -2
- webscout/Provider/Deepinfra.py +288 -286
- webscout/Provider/ElectronHub.py +709 -634
- webscout/Provider/ExaChat.py +325 -0
- webscout/Provider/Free2GPT.py +2 -2
- webscout/Provider/Gemini.py +167 -179
- webscout/Provider/GithubChat.py +1 -1
- webscout/Provider/Glider.py +4 -4
- webscout/Provider/Groq.py +41 -27
- webscout/Provider/HF_space/qwen_qwen2.py +1 -1
- webscout/Provider/HeckAI.py +1 -1
- webscout/Provider/HuggingFaceChat.py +1 -1
- webscout/Provider/Hunyuan.py +1 -1
- webscout/Provider/Jadve.py +3 -3
- webscout/Provider/Koboldai.py +3 -3
- webscout/Provider/LambdaChat.py +3 -2
- webscout/Provider/Llama.py +3 -5
- webscout/Provider/Llama3.py +4 -12
- webscout/Provider/Marcus.py +3 -3
- webscout/Provider/OLLAMA.py +8 -8
- webscout/Provider/Openai.py +7 -3
- webscout/Provider/PI.py +1 -1
- webscout/Provider/Perplexitylabs.py +1 -1
- webscout/Provider/Phind.py +1 -1
- webscout/Provider/PizzaGPT.py +1 -1
- webscout/Provider/QwenLM.py +4 -7
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +3 -1
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +3 -3
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/artbit/async_artbit.py +1 -1
- webscout/Provider/TTI/artbit/sync_artbit.py +1 -1
- webscout/Provider/TTI/huggingface/async_huggingface.py +1 -1
- webscout/Provider/TTI/huggingface/sync_huggingface.py +1 -1
- webscout/Provider/TTI/piclumen/__init__.py +22 -22
- webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +1 -1
- webscout/Provider/TTS/utils.py +1 -1
- webscout/Provider/TeachAnything.py +1 -1
- webscout/Provider/TextPollinationsAI.py +232 -230
- webscout/Provider/TwoAI.py +1 -2
- webscout/Provider/Venice.py +4 -2
- webscout/Provider/VercelAI.py +234 -0
- webscout/Provider/WebSim.py +3 -2
- webscout/Provider/WiseCat.py +10 -12
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +10 -4
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/aimathgpt.py +2 -6
- webscout/Provider/akashgpt.py +1 -1
- webscout/Provider/askmyai.py +4 -4
- webscout/Provider/{DARKAI.py → asksteve.py} +56 -77
- webscout/Provider/bagoodex.py +2 -2
- webscout/Provider/cerebras.py +1 -1
- webscout/Provider/chatglm.py +4 -4
- webscout/Provider/cleeai.py +1 -0
- webscout/Provider/copilot.py +21 -9
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/flowith.py +1 -1
- webscout/Provider/freeaichat.py +64 -31
- webscout/Provider/gaurish.py +3 -5
- webscout/Provider/geminiprorealtime.py +1 -1
- webscout/Provider/granite.py +4 -4
- webscout/Provider/hermes.py +5 -5
- webscout/Provider/julius.py +1 -1
- webscout/Provider/koala.py +1 -1
- webscout/Provider/lepton.py +1 -1
- webscout/Provider/llama3mitril.py +4 -4
- webscout/Provider/llamatutor.py +1 -1
- webscout/Provider/llmchat.py +3 -3
- webscout/Provider/meta.py +1 -1
- webscout/Provider/multichat.py +10 -10
- webscout/Provider/promptrefine.py +1 -1
- webscout/Provider/searchchat.py +293 -0
- webscout/Provider/sonus.py +2 -2
- webscout/Provider/talkai.py +2 -2
- webscout/Provider/turboseek.py +1 -1
- webscout/Provider/tutorai.py +1 -1
- webscout/Provider/typegpt.py +5 -42
- webscout/Provider/uncovr.py +312 -297
- webscout/Provider/x0gpt.py +1 -1
- webscout/Provider/yep.py +64 -12
- webscout/__init__.py +3 -1
- webscout/cli.py +59 -98
- webscout/conversation.py +350 -17
- webscout/litprinter/__init__.py +59 -667
- webscout/optimizers.py +419 -419
- webscout/tempid.py +11 -11
- webscout/update_checker.py +14 -12
- webscout/utils.py +2 -2
- webscout/version.py +1 -1
- webscout/webscout_search.py +146 -87
- webscout/webscout_search_async.py +148 -27
- {webscout-7.7.dist-info → webscout-7.9.dist-info}/METADATA +92 -66
- webscout-7.9.dist-info/RECORD +248 -0
- webscout/Provider/EDITEE.py +0 -192
- webscout/litprinter/colors.py +0 -54
- webscout-7.7.dist-info/RECORD +0 -234
- {webscout-7.7.dist-info → webscout-7.9.dist-info}/LICENSE.md +0 -0
- {webscout-7.7.dist-info → webscout-7.9.dist-info}/WHEEL +0 -0
- {webscout-7.7.dist-info → webscout-7.9.dist-info}/entry_points.txt +0 -0
- {webscout-7.7.dist-info → webscout-7.9.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,325 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import uuid
|
|
4
|
+
from typing import Any, Dict, Union, Optional
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout.litagent import LitAgent
|
|
10
|
+
|
|
11
|
+
# Model configurations
|
|
12
|
+
MODEL_CONFIGS = {
|
|
13
|
+
"exaanswer": {
|
|
14
|
+
"endpoint": "https://exa-chat.vercel.app/api/exaanswer",
|
|
15
|
+
"models": ["exaanswer"],
|
|
16
|
+
},
|
|
17
|
+
"gemini": {
|
|
18
|
+
"endpoint": "https://exa-chat.vercel.app/api/gemini",
|
|
19
|
+
"models": [
|
|
20
|
+
"gemini-2.0-flash",
|
|
21
|
+
"gemini-2.0-flash-exp-image-generation",
|
|
22
|
+
"gemini-2.0-flash-thinking-exp-01-21",
|
|
23
|
+
"gemini-2.5-pro-exp-03-25",
|
|
24
|
+
"gemini-2.0-pro-exp-02-05",
|
|
25
|
+
],
|
|
26
|
+
},
|
|
27
|
+
"openrouter": {
|
|
28
|
+
"endpoint": "https://exa-chat.vercel.app/api/openrouter",
|
|
29
|
+
"models": [
|
|
30
|
+
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
31
|
+
"deepseek/deepseek-r1:free",
|
|
32
|
+
"deepseek/deepseek-chat-v3-0324:free",
|
|
33
|
+
"google/gemma-3-27b-it:free",
|
|
34
|
+
],
|
|
35
|
+
},
|
|
36
|
+
"groq": {
|
|
37
|
+
"endpoint": "https://exa-chat.vercel.app/api/groq",
|
|
38
|
+
"models": [
|
|
39
|
+
"deepseek-r1-distill-llama-70b",
|
|
40
|
+
"deepseek-r1-distill-qwen-32b",
|
|
41
|
+
"gemma2-9b-it",
|
|
42
|
+
"llama-3.1-8b-instant",
|
|
43
|
+
"llama-3.2-1b-preview",
|
|
44
|
+
"llama-3.2-3b-preview",
|
|
45
|
+
"llama-3.2-90b-vision-preview",
|
|
46
|
+
"llama-3.3-70b-specdec",
|
|
47
|
+
"llama-3.3-70b-versatile",
|
|
48
|
+
"llama3-70b-8192",
|
|
49
|
+
"llama3-8b-8192",
|
|
50
|
+
"qwen-2.5-32b",
|
|
51
|
+
"qwen-2.5-coder-32b",
|
|
52
|
+
"qwen-qwq-32b"
|
|
53
|
+
],
|
|
54
|
+
},
|
|
55
|
+
"cerebras": {
|
|
56
|
+
"endpoint": "https://exa-chat.vercel.app/api/cerebras",
|
|
57
|
+
"models": [
|
|
58
|
+
"llama3.1-8b",
|
|
59
|
+
"llama-3.3-70b"
|
|
60
|
+
],
|
|
61
|
+
},
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
class ExaChat(Provider):
|
|
65
|
+
"""
|
|
66
|
+
A class to interact with multiple AI APIs through the Exa Chat interface.
|
|
67
|
+
"""
|
|
68
|
+
AVAILABLE_MODELS = [
|
|
69
|
+
# ExaAnswer Models
|
|
70
|
+
"exaanswer",
|
|
71
|
+
|
|
72
|
+
# Gemini Models
|
|
73
|
+
"gemini-2.0-flash",
|
|
74
|
+
"gemini-2.0-flash-thinking-exp-01-21",
|
|
75
|
+
"gemini-2.5-pro-exp-03-25",
|
|
76
|
+
"gemini-2.0-pro-exp-02-05",
|
|
77
|
+
|
|
78
|
+
# OpenRouter Models
|
|
79
|
+
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
80
|
+
"deepseek/deepseek-r1:free",
|
|
81
|
+
"deepseek/deepseek-chat-v3-0324:free",
|
|
82
|
+
"google/gemma-3-27b-it:free",
|
|
83
|
+
|
|
84
|
+
# Groq Models
|
|
85
|
+
"deepseek-r1-distill-llama-70b",
|
|
86
|
+
"deepseek-r1-distill-qwen-32b",
|
|
87
|
+
"gemma2-9b-it",
|
|
88
|
+
"llama-3.1-8b-instant",
|
|
89
|
+
"llama-3.2-1b-preview",
|
|
90
|
+
"llama-3.2-3b-preview",
|
|
91
|
+
"llama-3.2-90b-vision-preview",
|
|
92
|
+
"llama-3.3-70b-specdec",
|
|
93
|
+
"llama-3.3-70b-versatile",
|
|
94
|
+
"llama3-70b-8192",
|
|
95
|
+
"llama3-8b-8192",
|
|
96
|
+
"qwen-2.5-32b",
|
|
97
|
+
"qwen-2.5-coder-32b",
|
|
98
|
+
"qwen-qwq-32b",
|
|
99
|
+
|
|
100
|
+
# Cerebras Models
|
|
101
|
+
"llama3.1-8b",
|
|
102
|
+
"llama-3.3-70b"
|
|
103
|
+
]
|
|
104
|
+
|
|
105
|
+
def __init__(
|
|
106
|
+
self,
|
|
107
|
+
is_conversation: bool = True,
|
|
108
|
+
max_tokens: int = 4000,
|
|
109
|
+
timeout: int = 30,
|
|
110
|
+
intro: str = None,
|
|
111
|
+
filepath: str = None,
|
|
112
|
+
update_file: bool = True,
|
|
113
|
+
proxies: dict = {},
|
|
114
|
+
history_offset: int = 10250,
|
|
115
|
+
act: str = None,
|
|
116
|
+
model: str = "exaanswer",
|
|
117
|
+
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
118
|
+
temperature: float = 0.5,
|
|
119
|
+
presence_penalty: int = 0,
|
|
120
|
+
frequency_penalty: int = 0,
|
|
121
|
+
top_p: float = 1
|
|
122
|
+
):
|
|
123
|
+
"""Initializes the ExaChat client."""
|
|
124
|
+
if model not in self.AVAILABLE_MODELS:
|
|
125
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
126
|
+
|
|
127
|
+
self.session = requests.Session()
|
|
128
|
+
self.is_conversation = is_conversation
|
|
129
|
+
self.max_tokens_to_sample = max_tokens
|
|
130
|
+
self.timeout = timeout
|
|
131
|
+
self.last_response = {}
|
|
132
|
+
self.model = model
|
|
133
|
+
self.system_prompt = system_prompt
|
|
134
|
+
self.temperature = temperature
|
|
135
|
+
self.presence_penalty = presence_penalty
|
|
136
|
+
self.frequency_penalty = frequency_penalty
|
|
137
|
+
self.top_p = top_p
|
|
138
|
+
|
|
139
|
+
# Initialize LitAgent for user agent generation
|
|
140
|
+
self.agent = LitAgent()
|
|
141
|
+
|
|
142
|
+
self.headers = {
|
|
143
|
+
"accept": "*/*",
|
|
144
|
+
"accept-language": "en-US,en;q=0.9",
|
|
145
|
+
"content-type": "application/json",
|
|
146
|
+
"origin": "https://exa-chat.vercel.app",
|
|
147
|
+
"referer": "https://exa-chat.vercel.app/",
|
|
148
|
+
"user-agent": self.agent.random(),
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
self.session.headers.update(self.headers)
|
|
152
|
+
self.session.proxies = proxies
|
|
153
|
+
self.session.cookies.update({"session": uuid.uuid4().hex})
|
|
154
|
+
|
|
155
|
+
self.__available_optimizers = (
|
|
156
|
+
method for method in dir(Optimizers)
|
|
157
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
Conversation.intro = (
|
|
161
|
+
AwesomePrompts().get_act(
|
|
162
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
163
|
+
)
|
|
164
|
+
if act
|
|
165
|
+
else intro or Conversation.intro
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
self.conversation = Conversation(
|
|
169
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
170
|
+
)
|
|
171
|
+
self.conversation.history_offset = history_offset
|
|
172
|
+
|
|
173
|
+
self.provider = self._get_provider_from_model(self.model)
|
|
174
|
+
self.model_name = self.model
|
|
175
|
+
|
|
176
|
+
def _get_endpoint(self) -> str:
|
|
177
|
+
"""Get the API endpoint for the current provider."""
|
|
178
|
+
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
179
|
+
|
|
180
|
+
def _get_provider_from_model(self, model: str) -> str:
|
|
181
|
+
"""Determine the provider based on the model name."""
|
|
182
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
183
|
+
if model in config["models"]:
|
|
184
|
+
return provider
|
|
185
|
+
|
|
186
|
+
available_models = []
|
|
187
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
188
|
+
for model_name in config["models"]:
|
|
189
|
+
available_models.append(f"{provider}/{model_name}")
|
|
190
|
+
|
|
191
|
+
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
192
|
+
raise ValueError(error_msg)
|
|
193
|
+
|
|
194
|
+
def _make_request(self, payload: Dict[str, Any]) -> requests.Response:
|
|
195
|
+
"""Make the API request with proper error handling."""
|
|
196
|
+
try:
|
|
197
|
+
response = self.session.post(
|
|
198
|
+
self._get_endpoint(),
|
|
199
|
+
headers=self.headers,
|
|
200
|
+
json=payload,
|
|
201
|
+
timeout=self.timeout,
|
|
202
|
+
)
|
|
203
|
+
response.raise_for_status()
|
|
204
|
+
return response
|
|
205
|
+
except requests.exceptions.RequestException as e:
|
|
206
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
|
|
207
|
+
|
|
208
|
+
def _build_payload(self, conversation_prompt: str) -> Dict[str, Any]:
|
|
209
|
+
"""Build the appropriate payload based on the provider."""
|
|
210
|
+
if self.provider == "exaanswer":
|
|
211
|
+
return {
|
|
212
|
+
"query": conversation_prompt,
|
|
213
|
+
"messages": []
|
|
214
|
+
}
|
|
215
|
+
elif self.provider == "gemini":
|
|
216
|
+
return {
|
|
217
|
+
"query": conversation_prompt,
|
|
218
|
+
"model": self.model,
|
|
219
|
+
"messages": []
|
|
220
|
+
}
|
|
221
|
+
elif self.provider == "cerebras":
|
|
222
|
+
return {
|
|
223
|
+
"query": conversation_prompt,
|
|
224
|
+
"model": self.model,
|
|
225
|
+
"messages": []
|
|
226
|
+
}
|
|
227
|
+
else: # openrouter or groq
|
|
228
|
+
return {
|
|
229
|
+
"query": conversation_prompt + "\n", # Add newline for openrouter and groq models
|
|
230
|
+
"model": self.model,
|
|
231
|
+
"messages": []
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
def ask(
|
|
235
|
+
self,
|
|
236
|
+
prompt: str,
|
|
237
|
+
raw: bool = False,
|
|
238
|
+
optimizer: str = None,
|
|
239
|
+
conversationally: bool = False,
|
|
240
|
+
) -> Dict[str, Any]:
|
|
241
|
+
"""Sends a prompt to the API and returns the response."""
|
|
242
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
243
|
+
if optimizer:
|
|
244
|
+
if optimizer in self.__available_optimizers:
|
|
245
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
246
|
+
conversation_prompt if conversationally else prompt
|
|
247
|
+
)
|
|
248
|
+
else:
|
|
249
|
+
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
250
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
251
|
+
|
|
252
|
+
payload = self._build_payload(conversation_prompt)
|
|
253
|
+
response = self._make_request(payload)
|
|
254
|
+
|
|
255
|
+
try:
|
|
256
|
+
full_response = ""
|
|
257
|
+
for line in response.iter_lines():
|
|
258
|
+
if line:
|
|
259
|
+
try:
|
|
260
|
+
data = json.loads(line.decode('utf-8'))
|
|
261
|
+
if 'choices' in data and len(data['choices']) > 0:
|
|
262
|
+
content = data['choices'][0].get('delta', {}).get('content', '')
|
|
263
|
+
if content:
|
|
264
|
+
full_response += content
|
|
265
|
+
except json.JSONDecodeError:
|
|
266
|
+
continue
|
|
267
|
+
|
|
268
|
+
self.last_response = {"text": full_response}
|
|
269
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
270
|
+
return self.last_response
|
|
271
|
+
|
|
272
|
+
except json.JSONDecodeError as e:
|
|
273
|
+
raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
|
|
274
|
+
|
|
275
|
+
def chat(
|
|
276
|
+
self,
|
|
277
|
+
prompt: str,
|
|
278
|
+
optimizer: str = None,
|
|
279
|
+
conversationally: bool = False,
|
|
280
|
+
) -> str:
|
|
281
|
+
"""Generate response."""
|
|
282
|
+
response = self.ask(
|
|
283
|
+
prompt, optimizer=optimizer, conversationally=conversationally
|
|
284
|
+
)
|
|
285
|
+
return self.get_message(response)
|
|
286
|
+
|
|
287
|
+
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
288
|
+
"""
|
|
289
|
+
Retrieves message from response.
|
|
290
|
+
|
|
291
|
+
Args:
|
|
292
|
+
response (Union[Dict[str, Any], str]): The response to extract the message from
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
str: The extracted message text
|
|
296
|
+
"""
|
|
297
|
+
if isinstance(response, dict):
|
|
298
|
+
return response.get("text", "")
|
|
299
|
+
return str(response)
|
|
300
|
+
|
|
301
|
+
if __name__ == "__main__":
|
|
302
|
+
print("-" * 80)
|
|
303
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
304
|
+
print("-" * 80)
|
|
305
|
+
|
|
306
|
+
# Test all available models
|
|
307
|
+
working = 0
|
|
308
|
+
total = len(ExaChat.AVAILABLE_MODELS)
|
|
309
|
+
|
|
310
|
+
for model in ExaChat.AVAILABLE_MODELS:
|
|
311
|
+
try:
|
|
312
|
+
test_ai = ExaChat(model=model, timeout=60)
|
|
313
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
314
|
+
response_text = response
|
|
315
|
+
|
|
316
|
+
if response_text and len(response_text.strip()) > 0:
|
|
317
|
+
status = "✓"
|
|
318
|
+
# Truncate response if too long
|
|
319
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
320
|
+
else:
|
|
321
|
+
status = "✗"
|
|
322
|
+
display_text = "Empty or invalid response"
|
|
323
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
324
|
+
except Exception as e:
|
|
325
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Free2GPT.py
CHANGED
|
@@ -13,7 +13,7 @@ Select the variant by passing the 'variant' parameter in the constructor:
|
|
|
13
13
|
variant="gpt" --> Uses https://chat1.free2gpt.com/api/generate
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
|
-
from typing import Optional, Dict
|
|
16
|
+
from typing import Union, Optional, Dict
|
|
17
17
|
import time
|
|
18
18
|
import json
|
|
19
19
|
import requests
|
|
@@ -23,7 +23,7 @@ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
|
23
23
|
from webscout.AIbase import Provider
|
|
24
24
|
from webscout import exceptions
|
|
25
25
|
from webscout.Litlogger import Logger, LogFormat
|
|
26
|
-
from webscout import LitAgent
|
|
26
|
+
from webscout.litagent import LitAgent
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
class Free2GPT(Provider):
|