webscout 7.7__py3-none-any.whl → 7.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -1
- webscout/Bard.py +14 -11
- webscout/DWEBS.py +431 -415
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +332 -194
- webscout/Extra/autocoder/rawdog.py +68 -9
- webscout/Extra/gguf.py +682 -682
- webscout/Provider/AI21.py +1 -1
- webscout/Provider/AISEARCH/DeepFind.py +2 -2
- webscout/Provider/AISEARCH/ISou.py +2 -2
- webscout/Provider/AISEARCH/felo_search.py +6 -6
- webscout/Provider/AISEARCH/genspark_search.py +1 -1
- webscout/Provider/Aitopia.py +292 -0
- webscout/Provider/AllenAI.py +1 -1
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/C4ai.py +1 -1
- webscout/Provider/ChatGPTES.py +3 -5
- webscout/Provider/ChatGPTGratis.py +4 -4
- webscout/Provider/Chatify.py +2 -2
- webscout/Provider/Cloudflare.py +3 -2
- webscout/Provider/DARKAI.py +3 -2
- webscout/Provider/DeepSeek.py +2 -2
- webscout/Provider/Deepinfra.py +1 -1
- webscout/Provider/EDITEE.py +1 -1
- webscout/Provider/ElectronHub.py +178 -96
- webscout/Provider/ExaChat.py +310 -0
- webscout/Provider/Free2GPT.py +2 -2
- webscout/Provider/Gemini.py +5 -19
- webscout/Provider/GithubChat.py +1 -1
- webscout/Provider/Glider.py +4 -4
- webscout/Provider/Groq.py +3 -3
- webscout/Provider/HF_space/qwen_qwen2.py +1 -1
- webscout/Provider/HeckAI.py +1 -1
- webscout/Provider/HuggingFaceChat.py +1 -1
- webscout/Provider/Hunyuan.py +1 -1
- webscout/Provider/Jadve.py +3 -3
- webscout/Provider/Koboldai.py +3 -3
- webscout/Provider/LambdaChat.py +1 -1
- webscout/Provider/Llama.py +3 -5
- webscout/Provider/Llama3.py +4 -12
- webscout/Provider/Marcus.py +3 -3
- webscout/Provider/OLLAMA.py +8 -8
- webscout/Provider/Openai.py +7 -3
- webscout/Provider/PI.py +1 -1
- webscout/Provider/Perplexitylabs.py +1 -1
- webscout/Provider/Phind.py +1 -1
- webscout/Provider/PizzaGPT.py +1 -1
- webscout/Provider/QwenLM.py +4 -7
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +3 -1
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +3 -3
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/artbit/async_artbit.py +1 -1
- webscout/Provider/TTI/artbit/sync_artbit.py +1 -1
- webscout/Provider/TTI/huggingface/async_huggingface.py +1 -1
- webscout/Provider/TTI/huggingface/sync_huggingface.py +1 -1
- webscout/Provider/TTI/piclumen/__init__.py +22 -22
- webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +1 -1
- webscout/Provider/TTS/utils.py +1 -1
- webscout/Provider/TeachAnything.py +1 -1
- webscout/Provider/TextPollinationsAI.py +4 -4
- webscout/Provider/TwoAI.py +1 -2
- webscout/Provider/Venice.py +4 -2
- webscout/Provider/VercelAI.py +234 -0
- webscout/Provider/WebSim.py +3 -2
- webscout/Provider/WiseCat.py +10 -12
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +10 -0
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/aimathgpt.py +2 -6
- webscout/Provider/akashgpt.py +1 -1
- webscout/Provider/askmyai.py +4 -4
- webscout/Provider/asksteve.py +203 -0
- webscout/Provider/bagoodex.py +2 -2
- webscout/Provider/cerebras.py +1 -1
- webscout/Provider/chatglm.py +4 -4
- webscout/Provider/cleeai.py +1 -0
- webscout/Provider/copilot.py +427 -415
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/flowith.py +1 -1
- webscout/Provider/freeaichat.py +57 -31
- webscout/Provider/gaurish.py +3 -5
- webscout/Provider/geminiprorealtime.py +1 -1
- webscout/Provider/granite.py +4 -4
- webscout/Provider/hermes.py +5 -5
- webscout/Provider/julius.py +1 -1
- webscout/Provider/koala.py +1 -1
- webscout/Provider/lepton.py +1 -1
- webscout/Provider/llama3mitril.py +4 -4
- webscout/Provider/llamatutor.py +1 -1
- webscout/Provider/llmchat.py +3 -3
- webscout/Provider/meta.py +1 -1
- webscout/Provider/multichat.py +10 -10
- webscout/Provider/promptrefine.py +1 -1
- webscout/Provider/searchchat.py +293 -0
- webscout/Provider/sonus.py +2 -2
- webscout/Provider/talkai.py +2 -2
- webscout/Provider/turboseek.py +1 -1
- webscout/Provider/tutorai.py +1 -1
- webscout/Provider/typegpt.py +5 -42
- webscout/Provider/uncovr.py +4 -2
- webscout/Provider/x0gpt.py +1 -1
- webscout/__init__.py +36 -36
- webscout/cli.py +293 -332
- webscout/tempid.py +11 -11
- webscout/utils.py +2 -2
- webscout/version.py +1 -1
- webscout/webscout_search.py +1282 -1223
- webscout/webscout_search_async.py +813 -692
- {webscout-7.7.dist-info → webscout-7.8.dist-info}/METADATA +50 -29
- {webscout-7.7.dist-info → webscout-7.8.dist-info}/RECORD +121 -110
- {webscout-7.7.dist-info → webscout-7.8.dist-info}/LICENSE.md +0 -0
- {webscout-7.7.dist-info → webscout-7.8.dist-info}/WHEEL +0 -0
- {webscout-7.7.dist-info → webscout-7.8.dist-info}/entry_points.txt +0 -0
- {webscout-7.7.dist-info → webscout-7.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import uuid
|
|
4
|
+
from typing import Any, Dict, Union, Optional
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout.litagent import LitAgent
|
|
10
|
+
|
|
11
|
+
# Model configurations
|
|
12
|
+
MODEL_CONFIGS = {
|
|
13
|
+
"exaanswer": {
|
|
14
|
+
"endpoint": "https://exa-chat.vercel.app/api/exaanswer",
|
|
15
|
+
"models": ["exaanswer"],
|
|
16
|
+
},
|
|
17
|
+
"gemini": {
|
|
18
|
+
"endpoint": "https://exa-chat.vercel.app/api/gemini",
|
|
19
|
+
"models": [
|
|
20
|
+
"gemini-2.0-flash",
|
|
21
|
+
"gemini-2.0-flash-thinking-exp-01-21",
|
|
22
|
+
"gemini-2.5-pro-exp-03-25",
|
|
23
|
+
"gemini-2.0-pro-exp-02-05",
|
|
24
|
+
],
|
|
25
|
+
},
|
|
26
|
+
"openrouter": {
|
|
27
|
+
"endpoint": "https://exa-chat.vercel.app/api/openrouter",
|
|
28
|
+
"models": [
|
|
29
|
+
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
30
|
+
"deepseek/deepseek-r1:free",
|
|
31
|
+
"deepseek/deepseek-chat-v3-0324:free",
|
|
32
|
+
"google/gemma-3-27b-it:free",
|
|
33
|
+
],
|
|
34
|
+
},
|
|
35
|
+
"groq": {
|
|
36
|
+
"endpoint": "https://exa-chat.vercel.app/api/groq",
|
|
37
|
+
"models": [
|
|
38
|
+
"deepseek-r1-distill-llama-70b",
|
|
39
|
+
"deepseek-r1-distill-qwen-32b",
|
|
40
|
+
"gemma2-9b-it",
|
|
41
|
+
"llama-3.1-8b-instant",
|
|
42
|
+
"llama-3.2-1b-preview",
|
|
43
|
+
"llama-3.2-3b-preview",
|
|
44
|
+
"llama-3.2-90b-vision-preview",
|
|
45
|
+
"llama-3.3-70b-specdec",
|
|
46
|
+
"llama-3.3-70b-versatile",
|
|
47
|
+
"llama3-70b-8192",
|
|
48
|
+
"llama3-8b-8192",
|
|
49
|
+
"qwen-2.5-32b",
|
|
50
|
+
"qwen-2.5-coder-32b",
|
|
51
|
+
"qwen-qwq-32b"
|
|
52
|
+
],
|
|
53
|
+
},
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
class ExaChat(Provider):
|
|
57
|
+
"""
|
|
58
|
+
A class to interact with multiple AI APIs through the Exa Chat interface.
|
|
59
|
+
"""
|
|
60
|
+
AVAILABLE_MODELS = [
|
|
61
|
+
# ExaAnswer Models
|
|
62
|
+
"exaanswer",
|
|
63
|
+
|
|
64
|
+
# Gemini Models
|
|
65
|
+
"gemini-2.0-flash",
|
|
66
|
+
"gemini-2.0-flash-thinking-exp-01-21",
|
|
67
|
+
"gemini-2.5-pro-exp-03-25",
|
|
68
|
+
"gemini-2.0-pro-exp-02-05",
|
|
69
|
+
|
|
70
|
+
# OpenRouter Models
|
|
71
|
+
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
72
|
+
"deepseek/deepseek-r1:free",
|
|
73
|
+
"deepseek/deepseek-chat-v3-0324:free",
|
|
74
|
+
"google/gemma-3-27b-it:free",
|
|
75
|
+
|
|
76
|
+
# Groq Models
|
|
77
|
+
"deepseek-r1-distill-llama-70b",
|
|
78
|
+
"deepseek-r1-distill-qwen-32b",
|
|
79
|
+
"gemma2-9b-it",
|
|
80
|
+
"llama-3.1-8b-instant",
|
|
81
|
+
"llama-3.2-1b-preview",
|
|
82
|
+
"llama-3.2-3b-preview",
|
|
83
|
+
"llama-3.2-90b-vision-preview",
|
|
84
|
+
"llama-3.3-70b-specdec",
|
|
85
|
+
"llama-3.3-70b-versatile",
|
|
86
|
+
"llama3-70b-8192",
|
|
87
|
+
"llama3-8b-8192",
|
|
88
|
+
"qwen-2.5-32b",
|
|
89
|
+
"qwen-2.5-coder-32b",
|
|
90
|
+
"qwen-qwq-32b"
|
|
91
|
+
]
|
|
92
|
+
|
|
93
|
+
def __init__(
|
|
94
|
+
self,
|
|
95
|
+
is_conversation: bool = True,
|
|
96
|
+
max_tokens: int = 4000,
|
|
97
|
+
timeout: int = 30,
|
|
98
|
+
intro: str = None,
|
|
99
|
+
filepath: str = None,
|
|
100
|
+
update_file: bool = True,
|
|
101
|
+
proxies: dict = {},
|
|
102
|
+
history_offset: int = 10250,
|
|
103
|
+
act: str = None,
|
|
104
|
+
model: str = "exaanswer",
|
|
105
|
+
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
106
|
+
temperature: float = 0.5,
|
|
107
|
+
presence_penalty: int = 0,
|
|
108
|
+
frequency_penalty: int = 0,
|
|
109
|
+
top_p: float = 1
|
|
110
|
+
):
|
|
111
|
+
"""Initializes the ExaChat client."""
|
|
112
|
+
if model not in self.AVAILABLE_MODELS:
|
|
113
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
114
|
+
|
|
115
|
+
self.session = requests.Session()
|
|
116
|
+
self.is_conversation = is_conversation
|
|
117
|
+
self.max_tokens_to_sample = max_tokens
|
|
118
|
+
self.timeout = timeout
|
|
119
|
+
self.last_response = {}
|
|
120
|
+
self.model = model
|
|
121
|
+
self.system_prompt = system_prompt
|
|
122
|
+
self.temperature = temperature
|
|
123
|
+
self.presence_penalty = presence_penalty
|
|
124
|
+
self.frequency_penalty = frequency_penalty
|
|
125
|
+
self.top_p = top_p
|
|
126
|
+
|
|
127
|
+
# Initialize LitAgent for user agent generation
|
|
128
|
+
self.agent = LitAgent()
|
|
129
|
+
|
|
130
|
+
self.headers = {
|
|
131
|
+
"accept": "*/*",
|
|
132
|
+
"accept-language": "en-US,en;q=0.9",
|
|
133
|
+
"content-type": "application/json",
|
|
134
|
+
"origin": "https://exa-chat.vercel.app",
|
|
135
|
+
"referer": "https://exa-chat.vercel.app/",
|
|
136
|
+
"user-agent": self.agent.random(),
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
self.session.headers.update(self.headers)
|
|
140
|
+
self.session.proxies = proxies
|
|
141
|
+
self.session.cookies.update({"session": uuid.uuid4().hex})
|
|
142
|
+
|
|
143
|
+
self.__available_optimizers = (
|
|
144
|
+
method for method in dir(Optimizers)
|
|
145
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
Conversation.intro = (
|
|
149
|
+
AwesomePrompts().get_act(
|
|
150
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
151
|
+
)
|
|
152
|
+
if act
|
|
153
|
+
else intro or Conversation.intro
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
self.conversation = Conversation(
|
|
157
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
158
|
+
)
|
|
159
|
+
self.conversation.history_offset = history_offset
|
|
160
|
+
|
|
161
|
+
self.provider = self._get_provider_from_model(self.model)
|
|
162
|
+
self.model_name = self.model
|
|
163
|
+
|
|
164
|
+
def _get_endpoint(self) -> str:
|
|
165
|
+
"""Get the API endpoint for the current provider."""
|
|
166
|
+
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
167
|
+
|
|
168
|
+
def _get_provider_from_model(self, model: str) -> str:
|
|
169
|
+
"""Determine the provider based on the model name."""
|
|
170
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
171
|
+
if model in config["models"]:
|
|
172
|
+
return provider
|
|
173
|
+
|
|
174
|
+
available_models = []
|
|
175
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
176
|
+
for model_name in config["models"]:
|
|
177
|
+
available_models.append(f"{provider}/{model_name}")
|
|
178
|
+
|
|
179
|
+
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
180
|
+
raise ValueError(error_msg)
|
|
181
|
+
|
|
182
|
+
def _make_request(self, payload: Dict[str, Any]) -> requests.Response:
|
|
183
|
+
"""Make the API request with proper error handling."""
|
|
184
|
+
try:
|
|
185
|
+
response = self.session.post(
|
|
186
|
+
self._get_endpoint(),
|
|
187
|
+
headers=self.headers,
|
|
188
|
+
json=payload,
|
|
189
|
+
timeout=self.timeout,
|
|
190
|
+
)
|
|
191
|
+
response.raise_for_status()
|
|
192
|
+
return response
|
|
193
|
+
except requests.exceptions.RequestException as e:
|
|
194
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
|
|
195
|
+
|
|
196
|
+
def _build_payload(self, conversation_prompt: str) -> Dict[str, Any]:
|
|
197
|
+
"""Build the appropriate payload based on the provider."""
|
|
198
|
+
if self.provider == "exaanswer":
|
|
199
|
+
return {
|
|
200
|
+
"query": conversation_prompt,
|
|
201
|
+
"messages": []
|
|
202
|
+
}
|
|
203
|
+
elif self.provider == "gemini":
|
|
204
|
+
return {
|
|
205
|
+
"query": conversation_prompt,
|
|
206
|
+
"model": self.model,
|
|
207
|
+
"messages": []
|
|
208
|
+
}
|
|
209
|
+
else: # openrouter or groq
|
|
210
|
+
return {
|
|
211
|
+
"query": conversation_prompt + "\n", # Add newline for openrouter and groq models
|
|
212
|
+
"model": self.model,
|
|
213
|
+
"messages": []
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
def ask(
|
|
217
|
+
self,
|
|
218
|
+
prompt: str,
|
|
219
|
+
raw: bool = False,
|
|
220
|
+
optimizer: str = None,
|
|
221
|
+
conversationally: bool = False,
|
|
222
|
+
) -> Dict[str, Any]:
|
|
223
|
+
"""Sends a prompt to the API and returns the response."""
|
|
224
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
225
|
+
if optimizer:
|
|
226
|
+
if optimizer in self.__available_optimizers:
|
|
227
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
228
|
+
conversation_prompt if conversationally else prompt
|
|
229
|
+
)
|
|
230
|
+
else:
|
|
231
|
+
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
232
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
233
|
+
|
|
234
|
+
payload = self._build_payload(conversation_prompt)
|
|
235
|
+
response = self._make_request(payload)
|
|
236
|
+
|
|
237
|
+
try:
|
|
238
|
+
full_response = ""
|
|
239
|
+
for line in response.iter_lines():
|
|
240
|
+
if line:
|
|
241
|
+
try:
|
|
242
|
+
data = json.loads(line.decode('utf-8'))
|
|
243
|
+
if 'choices' in data and len(data['choices']) > 0:
|
|
244
|
+
content = data['choices'][0].get('delta', {}).get('content', '')
|
|
245
|
+
if content:
|
|
246
|
+
full_response += content
|
|
247
|
+
except json.JSONDecodeError:
|
|
248
|
+
continue
|
|
249
|
+
|
|
250
|
+
if not raw:
|
|
251
|
+
print() # New line after response
|
|
252
|
+
|
|
253
|
+
self.last_response = {"text": full_response}
|
|
254
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
255
|
+
return self.last_response
|
|
256
|
+
|
|
257
|
+
except json.JSONDecodeError as e:
|
|
258
|
+
raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
|
|
259
|
+
|
|
260
|
+
def chat(
|
|
261
|
+
self,
|
|
262
|
+
prompt: str,
|
|
263
|
+
optimizer: str = None,
|
|
264
|
+
conversationally: bool = False,
|
|
265
|
+
) -> str:
|
|
266
|
+
"""Generate response."""
|
|
267
|
+
response = self.ask(
|
|
268
|
+
prompt, optimizer=optimizer, conversationally=conversationally
|
|
269
|
+
)
|
|
270
|
+
return self.get_message(response)
|
|
271
|
+
|
|
272
|
+
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
273
|
+
"""
|
|
274
|
+
Retrieves message from response.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
response (Union[Dict[str, Any], str]): The response to extract the message from
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
str: The extracted message text
|
|
281
|
+
"""
|
|
282
|
+
if isinstance(response, dict):
|
|
283
|
+
return response.get("text", "")
|
|
284
|
+
return str(response)
|
|
285
|
+
|
|
286
|
+
if __name__ == "__main__":
|
|
287
|
+
print("-" * 80)
|
|
288
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
289
|
+
print("-" * 80)
|
|
290
|
+
|
|
291
|
+
# Test all available models
|
|
292
|
+
working = 0
|
|
293
|
+
total = len(ExaChat.AVAILABLE_MODELS)
|
|
294
|
+
|
|
295
|
+
for model in ExaChat.AVAILABLE_MODELS:
|
|
296
|
+
try:
|
|
297
|
+
test_ai = ExaChat(model=model, timeout=60)
|
|
298
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
299
|
+
response_text = response
|
|
300
|
+
|
|
301
|
+
if response_text and len(response_text.strip()) > 0:
|
|
302
|
+
status = "✓"
|
|
303
|
+
# Truncate response if too long
|
|
304
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
305
|
+
else:
|
|
306
|
+
status = "✗"
|
|
307
|
+
display_text = "Empty or invalid response"
|
|
308
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
309
|
+
except Exception as e:
|
|
310
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Free2GPT.py
CHANGED
|
@@ -13,7 +13,7 @@ Select the variant by passing the 'variant' parameter in the constructor:
|
|
|
13
13
|
variant="gpt" --> Uses https://chat1.free2gpt.com/api/generate
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
|
-
from typing import Optional, Dict
|
|
16
|
+
from typing import Union, Optional, Dict
|
|
17
17
|
import time
|
|
18
18
|
import json
|
|
19
19
|
import requests
|
|
@@ -23,7 +23,7 @@ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
|
23
23
|
from webscout.AIbase import Provider
|
|
24
24
|
from webscout import exceptions
|
|
25
25
|
from webscout.Litlogger import Logger, LogFormat
|
|
26
|
-
from webscout import LitAgent
|
|
26
|
+
from webscout.litagent import LitAgent
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
class Free2GPT(Provider):
|
webscout/Provider/Gemini.py
CHANGED
|
@@ -1,17 +1,13 @@
|
|
|
1
|
-
|
|
2
1
|
from os import path
|
|
3
2
|
from json import load, dumps
|
|
4
3
|
import warnings
|
|
5
|
-
from typing import Any, Dict
|
|
4
|
+
from typing import Union, Any, Dict
|
|
6
5
|
|
|
7
6
|
# Import internal modules and dependencies
|
|
8
7
|
from ..AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
|
|
9
8
|
from ..AIbase import Provider, AsyncProvider
|
|
10
9
|
from ..Bard import Chatbot, Model
|
|
11
10
|
|
|
12
|
-
# Import Logger and related classes (assumed similar to what is in yep.py)
|
|
13
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
14
|
-
|
|
15
11
|
warnings.simplefilter("ignore", category=UserWarning)
|
|
16
12
|
|
|
17
13
|
# Define model aliases for easy usage
|
|
@@ -22,6 +18,7 @@ MODEL_ALIASES: Dict[str, Model] = {
|
|
|
22
18
|
"thinking": Model.G_2_0_FLASH_THINKING,
|
|
23
19
|
"thinking-with-apps": Model.G_2_0_FLASH_THINKING_WITH_APPS,
|
|
24
20
|
"exp-advanced": Model.G_2_0_EXP_ADVANCED,
|
|
21
|
+
"2.5-exp-advanced": Model.G_2_5_EXP_ADVANCED,
|
|
25
22
|
"1.5-flash": Model.G_1_5_FLASH,
|
|
26
23
|
"1.5-pro": Model.G_1_5_PRO,
|
|
27
24
|
"1.5-pro-research": Model.G_1_5_PRO_RESEARCH,
|
|
@@ -34,28 +31,23 @@ class GEMINI(Provider):
|
|
|
34
31
|
def __init__(
|
|
35
32
|
self,
|
|
36
33
|
cookie_file: str,
|
|
37
|
-
model, # Accepts either a Model enum or a str alias.
|
|
34
|
+
model: str = "flash", # Accepts either a Model enum or a str alias.
|
|
38
35
|
proxy: dict = {},
|
|
39
36
|
timeout: int = 30,
|
|
40
|
-
logging: bool = False # Flag to enable Logger debugging.
|
|
41
37
|
):
|
|
42
38
|
"""
|
|
43
|
-
Initializes GEMINI with model support
|
|
39
|
+
Initializes GEMINI with model support.
|
|
44
40
|
|
|
45
41
|
Args:
|
|
46
42
|
cookie_file (str): Path to the cookies JSON file.
|
|
47
43
|
model (Model or str): Selected model for the session. Can be a Model enum
|
|
48
44
|
or a string alias. Available aliases: flash, flash-exp, thinking, thinking-with-apps,
|
|
49
|
-
exp-advanced, 1.5-flash, 1.5-pro, 1.5-pro-research.
|
|
45
|
+
exp-advanced, 2.5-exp-advanced, 2.5-pro, 1.5-flash, 1.5-pro, 1.5-pro-research.
|
|
50
46
|
proxy (dict, optional): HTTP request proxy. Defaults to {}.
|
|
51
47
|
timeout (int, optional): HTTP request timeout in seconds. Defaults to 30.
|
|
52
|
-
logging (bool, optional): Flag to enable Logger debugging. Defaults to False.
|
|
53
48
|
"""
|
|
54
49
|
self.conversation = Conversation(False)
|
|
55
50
|
|
|
56
|
-
# Initialize Logger only if logging is enabled; otherwise, set to None.
|
|
57
|
-
self.logger = Logger(name="GEMINI", format=LogFormat.MODERN_EMOJI) if logging else None
|
|
58
|
-
|
|
59
51
|
# Ensure cookie_file existence.
|
|
60
52
|
if not isinstance(cookie_file, str):
|
|
61
53
|
raise TypeError(f"cookie_file should be of type str, not '{type(cookie_file)}'")
|
|
@@ -80,8 +72,6 @@ class GEMINI(Provider):
|
|
|
80
72
|
self.__available_optimizers = (
|
|
81
73
|
method for method in dir(Optimizers) if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
82
74
|
)
|
|
83
|
-
if self.logger:
|
|
84
|
-
self.logger.debug("GEMINI initialized with model: {}".format(selected_model.model_name))
|
|
85
75
|
# Store cookies from Chatbot for later use (e.g. image generation)
|
|
86
76
|
self.session_auth1 = self.session.secure_1psid
|
|
87
77
|
self.session_auth2 = self.session.secure_1psidts
|
|
@@ -126,8 +116,6 @@ class GEMINI(Provider):
|
|
|
126
116
|
pass
|
|
127
117
|
return self.last_response
|
|
128
118
|
|
|
129
|
-
if self.logger:
|
|
130
|
-
self.logger.debug(f"Request sent: {prompt}")
|
|
131
119
|
return for_stream() if stream else for_non_stream()
|
|
132
120
|
|
|
133
121
|
def chat(
|
|
@@ -175,5 +163,3 @@ class GEMINI(Provider):
|
|
|
175
163
|
self.session.async_chatbot.conversation_id = ""
|
|
176
164
|
self.session.async_chatbot.response_id = ""
|
|
177
165
|
self.session.async_chatbot.choice_id = ""
|
|
178
|
-
if self.logger:
|
|
179
|
-
self.logger.debug("Conversation reset")
|
webscout/Provider/GithubChat.py
CHANGED
|
@@ -8,7 +8,7 @@ from webscout.AIutel import Optimizers
|
|
|
8
8
|
from webscout.AIutel import AwesomePrompts
|
|
9
9
|
from webscout.AIbase import Provider
|
|
10
10
|
from webscout import exceptions
|
|
11
|
-
from webscout import LitAgent
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
12
|
|
|
13
13
|
class GithubChat(Provider):
|
|
14
14
|
"""
|
webscout/Provider/Glider.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
|
-
from typing import Any, Dict, Generator, Optional
|
|
3
|
+
from typing import Union, Any, Dict, Generator, Optional
|
|
4
4
|
|
|
5
5
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
6
|
from webscout.AIbase import Provider
|
|
7
7
|
from webscout import exceptions
|
|
8
|
-
from webscout import LitAgent as Lit
|
|
8
|
+
from webscout.litagent import LitAgent as Lit
|
|
9
9
|
|
|
10
10
|
class GliderAI(Provider):
|
|
11
11
|
"""
|
|
@@ -81,7 +81,7 @@ class GliderAI(Provider):
|
|
|
81
81
|
raw: bool = False,
|
|
82
82
|
optimizer: Optional[str] = None,
|
|
83
83
|
conversationally: bool = False,
|
|
84
|
-
) -> Dict[str, Any]
|
|
84
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
85
85
|
"""Chat with AI.
|
|
86
86
|
|
|
87
87
|
Args:
|
|
@@ -151,7 +151,7 @@ class GliderAI(Provider):
|
|
|
151
151
|
stream: bool = False,
|
|
152
152
|
optimizer: Optional[str] = None,
|
|
153
153
|
conversationally: bool = False,
|
|
154
|
-
) -> str
|
|
154
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
155
155
|
"""Generate response as a string.
|
|
156
156
|
|
|
157
157
|
Args:
|
webscout/Provider/Groq.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any, AsyncGenerator, Dict, Optional, Callable, List
|
|
1
|
+
from typing import Any, AsyncGenerator, Dict, Optional, Callable, List, Union
|
|
2
2
|
|
|
3
3
|
import httpx
|
|
4
4
|
import requests
|
|
@@ -459,7 +459,7 @@ class AsyncGROQ(AsyncProvider):
|
|
|
459
459
|
optimizer: str = None,
|
|
460
460
|
conversationally: bool = False,
|
|
461
461
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
462
|
-
) -> dict
|
|
462
|
+
) -> Union[dict, AsyncGenerator]:
|
|
463
463
|
"""Chat with AI asynchronously.
|
|
464
464
|
|
|
465
465
|
Args:
|
|
@@ -606,7 +606,7 @@ class AsyncGROQ(AsyncProvider):
|
|
|
606
606
|
optimizer: str = None,
|
|
607
607
|
conversationally: bool = False,
|
|
608
608
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
609
|
-
) -> str
|
|
609
|
+
) -> Union[str, AsyncGenerator]:
|
|
610
610
|
"""Generate response `str` asynchronously.
|
|
611
611
|
Args:
|
|
612
612
|
prompt (str): Prompt to be send.
|
webscout/Provider/HeckAI.py
CHANGED
|
@@ -9,7 +9,7 @@ from webscout.AIutel import Conversation
|
|
|
9
9
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
10
|
from webscout.AIbase import Provider, AsyncProvider
|
|
11
11
|
from webscout import exceptions
|
|
12
|
-
from webscout import LitAgent
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
13
|
|
|
14
14
|
class HeckAI(Provider):
|
|
15
15
|
"""
|
|
@@ -9,7 +9,7 @@ from typing import Any, Dict, List, Optional, Union, Generator
|
|
|
9
9
|
from webscout.AIutel import Conversation
|
|
10
10
|
from webscout.AIbase import Provider
|
|
11
11
|
from webscout import exceptions
|
|
12
|
-
from webscout import LitAgent
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
13
|
|
|
14
14
|
class HuggingFaceChat(Provider):
|
|
15
15
|
"""
|
webscout/Provider/Hunyuan.py
CHANGED
|
@@ -11,7 +11,7 @@ from webscout.AIutel import Conversation
|
|
|
11
11
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
12
12
|
from webscout.AIbase import Provider, AsyncProvider
|
|
13
13
|
from webscout import exceptions
|
|
14
|
-
from webscout import LitAgent
|
|
14
|
+
from webscout.litagent import LitAgent
|
|
15
15
|
|
|
16
16
|
class Hunyuan(Provider):
|
|
17
17
|
"""
|
webscout/Provider/Jadve.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
3
|
import re
|
|
4
|
-
from typing import Any, Dict, Optional, Generator
|
|
4
|
+
from typing import Union, Any, Dict, Optional, Generator
|
|
5
5
|
|
|
6
6
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
7
|
from webscout.AIbase import Provider
|
|
@@ -105,7 +105,7 @@ class JadveOpenAI(Provider):
|
|
|
105
105
|
raw: bool = False,
|
|
106
106
|
optimizer: str = None,
|
|
107
107
|
conversationally: bool = False,
|
|
108
|
-
) -> dict
|
|
108
|
+
) -> Union[dict, Generator[dict, None, None]]:
|
|
109
109
|
"""
|
|
110
110
|
Chat with AI.
|
|
111
111
|
|
|
@@ -206,7 +206,7 @@ class JadveOpenAI(Provider):
|
|
|
206
206
|
stream: bool = False,
|
|
207
207
|
optimizer: str = None,
|
|
208
208
|
conversationally: bool = False,
|
|
209
|
-
) -> str
|
|
209
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
210
210
|
"""
|
|
211
211
|
Generate a chat response (string).
|
|
212
212
|
|
webscout/Provider/Koboldai.py
CHANGED
|
@@ -5,7 +5,7 @@ from ..AIutel import Conversation
|
|
|
5
5
|
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
6
6
|
from ..AIbase import Provider, AsyncProvider
|
|
7
7
|
from webscout import exceptions
|
|
8
|
-
from typing import Any, AsyncGenerator, Dict
|
|
8
|
+
from typing import Union, Any, AsyncGenerator, Dict
|
|
9
9
|
import httpx
|
|
10
10
|
#------------------------------------------------------KOBOLDAI-----------------------------------------------------------
|
|
11
11
|
class KOBOLDAI(Provider):
|
|
@@ -266,7 +266,7 @@ class AsyncKOBOLDAI(AsyncProvider):
|
|
|
266
266
|
raw: bool = False,
|
|
267
267
|
optimizer: str = None,
|
|
268
268
|
conversationally: bool = False,
|
|
269
|
-
) -> dict
|
|
269
|
+
) -> Union[dict, AsyncGenerator]:
|
|
270
270
|
"""Chat with AI asynchronously.
|
|
271
271
|
|
|
272
272
|
Args:
|
|
@@ -338,7 +338,7 @@ class AsyncKOBOLDAI(AsyncProvider):
|
|
|
338
338
|
stream: bool = False,
|
|
339
339
|
optimizer: str = None,
|
|
340
340
|
conversationally: bool = False,
|
|
341
|
-
) -> str
|
|
341
|
+
) -> Union[str, AsyncGenerator]:
|
|
342
342
|
"""Generate response `str` asynchronously.
|
|
343
343
|
Args:
|
|
344
344
|
prompt (str): Prompt to be send.
|
webscout/Provider/LambdaChat.py
CHANGED
|
@@ -9,7 +9,7 @@ from typing import Any, Dict, List, Optional, Union, Generator
|
|
|
9
9
|
from webscout.AIutel import Conversation
|
|
10
10
|
from webscout.AIbase import Provider
|
|
11
11
|
from webscout import exceptions
|
|
12
|
-
from webscout import LitAgent
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
13
|
|
|
14
14
|
class LambdaChat(Provider):
|
|
15
15
|
"""
|
webscout/Provider/Llama.py
CHANGED
|
@@ -1,6 +1,4 @@
|
|
|
1
|
-
|
|
2
1
|
import requests
|
|
3
|
-
|
|
4
2
|
import json
|
|
5
3
|
|
|
6
4
|
from webscout.AIutel import Optimizers
|
|
@@ -8,7 +6,7 @@ from webscout.AIutel import Conversation
|
|
|
8
6
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
7
|
from webscout.AIbase import Provider, AsyncProvider
|
|
10
8
|
from webscout import exceptions
|
|
11
|
-
from typing import Any, AsyncGenerator, Dict
|
|
9
|
+
from typing import Any, AsyncGenerator, Dict, Union
|
|
12
10
|
|
|
13
11
|
|
|
14
12
|
class LLAMA(Provider):
|
|
@@ -74,7 +72,7 @@ class LLAMA(Provider):
|
|
|
74
72
|
raw: bool = False,
|
|
75
73
|
optimizer: str = None,
|
|
76
74
|
conversationally: bool = False,
|
|
77
|
-
) -> dict
|
|
75
|
+
) -> Union[dict, AsyncGenerator]:
|
|
78
76
|
"""Chat with AI
|
|
79
77
|
|
|
80
78
|
Args:
|
|
@@ -152,7 +150,7 @@ class LLAMA(Provider):
|
|
|
152
150
|
stream: bool = False,
|
|
153
151
|
optimizer: str = None,
|
|
154
152
|
conversationally: bool = False,
|
|
155
|
-
) -> str
|
|
153
|
+
) -> Union[str, AsyncGenerator]:
|
|
156
154
|
"""Generate response `str`
|
|
157
155
|
Args:
|
|
158
156
|
prompt (str): Prompt to be send.
|
webscout/Provider/Llama3.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
|
-
from typing import Any, Dict, Generator
|
|
3
|
+
from typing import Union, Any, Dict, Generator
|
|
4
4
|
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
@@ -90,7 +90,7 @@ class Sambanova(Provider):
|
|
|
90
90
|
raw: bool = False,
|
|
91
91
|
optimizer: str = None,
|
|
92
92
|
conversationally: bool = False,
|
|
93
|
-
) -> Any
|
|
93
|
+
) -> Union[Any, Generator[Any, None, None]]:
|
|
94
94
|
"""Chat with AI using the Sambanova API."""
|
|
95
95
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
96
96
|
if optimizer:
|
|
@@ -171,16 +171,8 @@ class Sambanova(Provider):
|
|
|
171
171
|
stream: bool = False,
|
|
172
172
|
optimizer: str = None,
|
|
173
173
|
conversationally: bool = False,
|
|
174
|
-
) ->
|
|
175
|
-
"""Generate response
|
|
176
|
-
Args:
|
|
177
|
-
prompt (str): Prompt to be sent.
|
|
178
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
179
|
-
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
180
|
-
conversationally (bool, optional): Use conversational tuning with the optimizer. Defaults to False.
|
|
181
|
-
Returns:
|
|
182
|
-
str: Generated response, or a generator of strings if streaming.
|
|
183
|
-
"""
|
|
174
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
175
|
+
"""Generate response `str`"""
|
|
184
176
|
if stream:
|
|
185
177
|
# For stream mode, yield the text chunks directly
|
|
186
178
|
return self.ask(prompt, stream=True, optimizer=optimizer, conversationally=conversationally)
|
webscout/Provider/Marcus.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
|
-
from typing import Any, Dict, Optional, Generator
|
|
3
|
+
from typing import Union, Any, Dict, Optional, Generator
|
|
4
4
|
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
@@ -69,7 +69,7 @@ class Marcus(Provider):
|
|
|
69
69
|
raw: bool = False,
|
|
70
70
|
optimizer: str = None,
|
|
71
71
|
conversationally: bool = False,
|
|
72
|
-
) -> Dict[str, Any]
|
|
72
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
73
73
|
"""Sends a prompt to the AskMarcus API and returns the response."""
|
|
74
74
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
75
75
|
if optimizer:
|
|
@@ -119,7 +119,7 @@ class Marcus(Provider):
|
|
|
119
119
|
stream: bool = False,
|
|
120
120
|
optimizer: str = None,
|
|
121
121
|
conversationally: bool = False,
|
|
122
|
-
) -> str
|
|
122
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
123
123
|
"""Generates a response from the AskMarcus API."""
|
|
124
124
|
def for_stream():
|
|
125
125
|
for response_chunk in self.ask(
|