webscout 7.7__py3-none-any.whl → 7.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -1
- webscout/Bard.py +12 -29
- webscout/DWEBS.py +477 -461
- webscout/Extra/__init__.py +2 -0
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -790
- webscout/Extra/autocoder/autocoder_utiles.py +332 -194
- webscout/Extra/gguf.py +682 -682
- webscout/Extra/tempmail/__init__.py +26 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +156 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Provider/AI21.py +1 -1
- webscout/Provider/AISEARCH/DeepFind.py +2 -2
- webscout/Provider/AISEARCH/ISou.py +2 -2
- webscout/Provider/AISEARCH/felo_search.py +6 -6
- webscout/Provider/AISEARCH/genspark_search.py +1 -1
- webscout/Provider/Aitopia.py +292 -0
- webscout/Provider/AllenAI.py +1 -1
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/C4ai.py +1 -1
- webscout/Provider/ChatGPTES.py +3 -5
- webscout/Provider/ChatGPTGratis.py +4 -4
- webscout/Provider/Chatify.py +2 -2
- webscout/Provider/Cloudflare.py +3 -2
- webscout/Provider/DeepSeek.py +2 -2
- webscout/Provider/Deepinfra.py +288 -286
- webscout/Provider/ElectronHub.py +709 -634
- webscout/Provider/ExaChat.py +325 -0
- webscout/Provider/Free2GPT.py +2 -2
- webscout/Provider/Gemini.py +167 -179
- webscout/Provider/GithubChat.py +1 -1
- webscout/Provider/Glider.py +4 -4
- webscout/Provider/Groq.py +41 -27
- webscout/Provider/HF_space/qwen_qwen2.py +1 -1
- webscout/Provider/HeckAI.py +1 -1
- webscout/Provider/HuggingFaceChat.py +1 -1
- webscout/Provider/Hunyuan.py +1 -1
- webscout/Provider/Jadve.py +3 -3
- webscout/Provider/Koboldai.py +3 -3
- webscout/Provider/LambdaChat.py +3 -2
- webscout/Provider/Llama.py +3 -5
- webscout/Provider/Llama3.py +4 -12
- webscout/Provider/Marcus.py +3 -3
- webscout/Provider/OLLAMA.py +8 -8
- webscout/Provider/Openai.py +7 -3
- webscout/Provider/PI.py +1 -1
- webscout/Provider/Perplexitylabs.py +1 -1
- webscout/Provider/Phind.py +1 -1
- webscout/Provider/PizzaGPT.py +1 -1
- webscout/Provider/QwenLM.py +4 -7
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +3 -1
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +3 -3
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/artbit/async_artbit.py +1 -1
- webscout/Provider/TTI/artbit/sync_artbit.py +1 -1
- webscout/Provider/TTI/huggingface/async_huggingface.py +1 -1
- webscout/Provider/TTI/huggingface/sync_huggingface.py +1 -1
- webscout/Provider/TTI/piclumen/__init__.py +22 -22
- webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +1 -1
- webscout/Provider/TTS/utils.py +1 -1
- webscout/Provider/TeachAnything.py +1 -1
- webscout/Provider/TextPollinationsAI.py +232 -230
- webscout/Provider/TwoAI.py +1 -2
- webscout/Provider/Venice.py +4 -2
- webscout/Provider/VercelAI.py +234 -0
- webscout/Provider/WebSim.py +3 -2
- webscout/Provider/WiseCat.py +10 -12
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +10 -4
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/aimathgpt.py +2 -6
- webscout/Provider/akashgpt.py +1 -1
- webscout/Provider/askmyai.py +4 -4
- webscout/Provider/{DARKAI.py → asksteve.py} +56 -77
- webscout/Provider/bagoodex.py +2 -2
- webscout/Provider/cerebras.py +1 -1
- webscout/Provider/chatglm.py +4 -4
- webscout/Provider/cleeai.py +1 -0
- webscout/Provider/copilot.py +21 -9
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/flowith.py +1 -1
- webscout/Provider/freeaichat.py +64 -31
- webscout/Provider/gaurish.py +3 -5
- webscout/Provider/geminiprorealtime.py +1 -1
- webscout/Provider/granite.py +4 -4
- webscout/Provider/hermes.py +5 -5
- webscout/Provider/julius.py +1 -1
- webscout/Provider/koala.py +1 -1
- webscout/Provider/lepton.py +1 -1
- webscout/Provider/llama3mitril.py +4 -4
- webscout/Provider/llamatutor.py +1 -1
- webscout/Provider/llmchat.py +3 -3
- webscout/Provider/meta.py +1 -1
- webscout/Provider/multichat.py +10 -10
- webscout/Provider/promptrefine.py +1 -1
- webscout/Provider/searchchat.py +293 -0
- webscout/Provider/sonus.py +2 -2
- webscout/Provider/talkai.py +2 -2
- webscout/Provider/turboseek.py +1 -1
- webscout/Provider/tutorai.py +1 -1
- webscout/Provider/typegpt.py +5 -42
- webscout/Provider/uncovr.py +312 -297
- webscout/Provider/x0gpt.py +1 -1
- webscout/Provider/yep.py +64 -12
- webscout/__init__.py +3 -1
- webscout/cli.py +59 -98
- webscout/conversation.py +350 -17
- webscout/litprinter/__init__.py +59 -667
- webscout/optimizers.py +419 -419
- webscout/tempid.py +11 -11
- webscout/update_checker.py +14 -12
- webscout/utils.py +2 -2
- webscout/version.py +1 -1
- webscout/webscout_search.py +146 -87
- webscout/webscout_search_async.py +148 -27
- {webscout-7.7.dist-info → webscout-7.9.dist-info}/METADATA +92 -66
- webscout-7.9.dist-info/RECORD +248 -0
- webscout/Provider/EDITEE.py +0 -192
- webscout/litprinter/colors.py +0 -54
- webscout-7.7.dist-info/RECORD +0 -234
- {webscout-7.7.dist-info → webscout-7.9.dist-info}/LICENSE.md +0 -0
- {webscout-7.7.dist-info → webscout-7.9.dist-info}/WHEEL +0 -0
- {webscout-7.7.dist-info → webscout-7.9.dist-info}/entry_points.txt +0 -0
- {webscout-7.7.dist-info → webscout-7.9.dist-info}/top_level.txt +0 -0
webscout/Provider/bagoodex.py
CHANGED
|
@@ -61,7 +61,7 @@ class Bagoodex(Provider):
|
|
|
61
61
|
raw: bool = False,
|
|
62
62
|
optimizer: str = None,
|
|
63
63
|
conversationally: bool = False,
|
|
64
|
-
) -> Dict[str, Any]
|
|
64
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
65
65
|
"""Sends a chat completion request to the Bagoodex API."""
|
|
66
66
|
|
|
67
67
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
@@ -113,7 +113,7 @@ class Bagoodex(Provider):
|
|
|
113
113
|
stream: bool = False,
|
|
114
114
|
optimizer: str = None,
|
|
115
115
|
conversationally: bool = False,
|
|
116
|
-
) -> Union[str, Generator]:
|
|
116
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
117
117
|
|
|
118
118
|
|
|
119
119
|
def for_stream():
|
webscout/Provider/cerebras.py
CHANGED
|
@@ -7,7 +7,7 @@ from typing import Any, Dict, Optional, Generator, List, Union
|
|
|
7
7
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
|
-
from webscout import LitAgent as UserAgent
|
|
10
|
+
from webscout.litagent import LitAgent as UserAgent
|
|
11
11
|
|
|
12
12
|
class Cerebras(Provider):
|
|
13
13
|
"""
|
webscout/Provider/chatglm.py
CHANGED
|
@@ -7,8 +7,8 @@ from webscout.AIutel import Optimizers
|
|
|
7
7
|
from webscout.AIutel import Conversation
|
|
8
8
|
from webscout.AIutel import AwesomePrompts
|
|
9
9
|
from webscout.AIbase import Provider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
12
|
|
|
13
13
|
class ChatGLM(Provider):
|
|
14
14
|
"""
|
|
@@ -75,7 +75,7 @@ class ChatGLM(Provider):
|
|
|
75
75
|
raw: bool = False,
|
|
76
76
|
optimizer: str = None,
|
|
77
77
|
conversationally: bool = False,
|
|
78
|
-
) -> Dict[str, Any]
|
|
78
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
79
79
|
"""Chat with AI
|
|
80
80
|
Args:
|
|
81
81
|
prompt (str): Prompt to be sent.
|
|
@@ -170,7 +170,7 @@ class ChatGLM(Provider):
|
|
|
170
170
|
stream: bool = False,
|
|
171
171
|
optimizer: str = None,
|
|
172
172
|
conversationally: bool = False,
|
|
173
|
-
) -> str
|
|
173
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
174
174
|
"""Generate response `str`"""
|
|
175
175
|
|
|
176
176
|
def for_stream():
|
webscout/Provider/cleeai.py
CHANGED
webscout/Provider/copilot.py
CHANGED
|
@@ -13,7 +13,7 @@ from webscout.AIutel import Conversation
|
|
|
13
13
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
14
14
|
from webscout.AIbase import Provider, AsyncProvider
|
|
15
15
|
from webscout import exceptions
|
|
16
|
-
from webscout import LitAgent
|
|
16
|
+
from webscout.litagent import LitAgent
|
|
17
17
|
|
|
18
18
|
try:
|
|
19
19
|
has_curl_cffi = True
|
|
@@ -43,11 +43,11 @@ class Copilot(Provider):
|
|
|
43
43
|
A class to interact with the Microsoft Copilot API.
|
|
44
44
|
"""
|
|
45
45
|
|
|
46
|
-
|
|
46
|
+
label = "Microsoft Copilot"
|
|
47
47
|
url = "https://copilot.microsoft.com"
|
|
48
48
|
websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
|
|
49
49
|
conversation_url = f"{url}/c/api/conversations"
|
|
50
|
-
|
|
50
|
+
AVAILABLE_MODELS = ["Copilot", "Think Deeper"]
|
|
51
51
|
_access_token: str = None
|
|
52
52
|
_cookies: dict = None
|
|
53
53
|
|
|
@@ -226,6 +226,7 @@ class Copilot(Provider):
|
|
|
226
226
|
|
|
227
227
|
# Connect to WebSocket
|
|
228
228
|
wss = session.ws_connect(websocket_url)
|
|
229
|
+
wss.send(json.dumps({"event":"setOptions","supportedCards":["weather","local","image","sports","video","ads","finance"],"ads":{"supportedTypes":["multimedia","product","tourActivity","propertyPromotion","text"]}}));
|
|
229
230
|
wss.send(json.dumps({
|
|
230
231
|
"event": "send",
|
|
231
232
|
"conversationId": conversation_id,
|
|
@@ -233,7 +234,7 @@ class Copilot(Provider):
|
|
|
233
234
|
"type": "text",
|
|
234
235
|
"text": conversation_prompt,
|
|
235
236
|
}],
|
|
236
|
-
"mode": "chat"
|
|
237
|
+
"mode": "reasoning" if "Think" in self.model else "chat"
|
|
237
238
|
}).encode(), CurlWsFlag.TEXT)
|
|
238
239
|
|
|
239
240
|
# Process response
|
|
@@ -263,6 +264,9 @@ class Copilot(Provider):
|
|
|
263
264
|
yield {"type": "image", "url": msg.get("url"), "prompt": image_prompt, "preview": msg.get("thumbnailUrl")}
|
|
264
265
|
elif msg.get("event") == "done":
|
|
265
266
|
break
|
|
267
|
+
elif msg.get("event") == "suggestedFollowups":
|
|
268
|
+
yield {"type": "suggested_followups", "suggestions": msg.get("suggestions")}
|
|
269
|
+
break
|
|
266
270
|
elif msg.get("event") == "replaceText":
|
|
267
271
|
content = msg.get("text")
|
|
268
272
|
streaming_text += content
|
|
@@ -270,6 +274,8 @@ class Copilot(Provider):
|
|
|
270
274
|
yield resp if raw else resp
|
|
271
275
|
elif msg.get("event") == "error":
|
|
272
276
|
raise exceptions.FailedToGenerateResponseError(f"Error: {msg}")
|
|
277
|
+
elif msg.get("event") not in ["received", "startMessage", "citation", "partCompleted"]:
|
|
278
|
+
print(f"Copilot Message: {msg}")
|
|
273
279
|
|
|
274
280
|
if not is_started:
|
|
275
281
|
raise exceptions.FailedToGenerateResponseError(f"Invalid response: {last_msg}")
|
|
@@ -310,10 +316,16 @@ class Copilot(Provider):
|
|
|
310
316
|
for response in self.ask(prompt, True, optimizer=optimizer,
|
|
311
317
|
conversationally=conversationally,
|
|
312
318
|
images=images, api_key=api_key, **kwargs):
|
|
313
|
-
if isinstance(response, dict)
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
319
|
+
if isinstance(response, dict):
|
|
320
|
+
if "text" in response:
|
|
321
|
+
yield response["text"]
|
|
322
|
+
elif "type" in response:
|
|
323
|
+
if response["type"] == "image":
|
|
324
|
+
yield f"\n\n"
|
|
325
|
+
elif response["type"] == "suggested_followups":
|
|
326
|
+
yield "\nSuggested follow-up questions:\n"
|
|
327
|
+
for suggestion in response["suggestions"]:
|
|
328
|
+
yield f"- {suggestion}\n"
|
|
317
329
|
|
|
318
330
|
def for_non_stream():
|
|
319
331
|
response = self.ask(prompt, False, optimizer=optimizer,
|
|
@@ -410,7 +422,7 @@ async def get_nodriver(proxy=None, user_data_dir=None):
|
|
|
410
422
|
|
|
411
423
|
if __name__ == "__main__":
|
|
412
424
|
from rich import print
|
|
413
|
-
ai = Copilot(timeout=900)
|
|
425
|
+
ai = Copilot(timeout=900, model="Think Deeper")
|
|
414
426
|
response = ai.chat(input("> "), stream=True)
|
|
415
427
|
for chunk in response:
|
|
416
428
|
print(chunk, end="", flush=True)
|
webscout/Provider/elmo.py
CHANGED
|
@@ -3,7 +3,7 @@ from webscout.AIutel import Optimizers
|
|
|
3
3
|
from webscout.AIutel import Conversation
|
|
4
4
|
from webscout.AIutel import AwesomePrompts
|
|
5
5
|
from webscout.AIbase import Provider
|
|
6
|
-
from webscout import LitAgent
|
|
6
|
+
from webscout.litagent import LitAgent
|
|
7
7
|
|
|
8
8
|
class Elmo(Provider):
|
|
9
9
|
"""
|
webscout/Provider/flowith.py
CHANGED
|
@@ -10,7 +10,7 @@ from webscout.AIutel import Conversation
|
|
|
10
10
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
11
11
|
from webscout.AIbase import Provider, AsyncProvider
|
|
12
12
|
from webscout import exceptions
|
|
13
|
-
from webscout import LitAgent
|
|
13
|
+
from webscout.litagent import LitAgent
|
|
14
14
|
|
|
15
15
|
class Flowith(Provider):
|
|
16
16
|
"""
|
webscout/Provider/freeaichat.py
CHANGED
|
@@ -8,39 +8,68 @@ from webscout.AIutel import Conversation
|
|
|
8
8
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
10
10
|
from webscout import exceptions
|
|
11
|
-
from webscout import LitAgent
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
12
|
|
|
13
13
|
class FreeAIChat(Provider):
|
|
14
14
|
"""
|
|
15
|
-
A class to interact with the FreeAIChat API
|
|
15
|
+
A class to interact with the FreeAIChat API
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
18
|
AVAILABLE_MODELS = [
|
|
19
|
-
|
|
20
|
-
"
|
|
21
|
-
"
|
|
22
|
-
"
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
|
|
26
|
-
"
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
"
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
"
|
|
33
|
-
"GPT-4o-mini",
|
|
34
|
-
"o1",
|
|
35
|
-
"o1-mini",
|
|
36
|
-
"GPT-4o",
|
|
37
|
-
"Qwen coder",
|
|
38
|
-
# "Qwen 2.5 72B", >>>> NOT WORKING
|
|
39
|
-
"Llama 3.1 405B",
|
|
40
|
-
# "llama3.1-70b-fast", >>>> NOT WORKING
|
|
41
|
-
# "Llama 3.3 70B", >>>> NOT WORKING
|
|
42
|
-
"claude 3.5 haiku",
|
|
19
|
+
# OpenAI Models
|
|
20
|
+
"GPT 4o",
|
|
21
|
+
"GPT 4.5 Preview",
|
|
22
|
+
"GPT 4o Latest",
|
|
23
|
+
"GPT 4o mini",
|
|
24
|
+
"GPT 4o Search Preview",
|
|
25
|
+
"O1",
|
|
26
|
+
"O1 Mini",
|
|
27
|
+
"O3 Mini",
|
|
28
|
+
"O3 Mini High",
|
|
29
|
+
"O3 Mini Low",
|
|
30
|
+
|
|
31
|
+
# Anthropic Models
|
|
32
|
+
"Claude 3.5 haiku",
|
|
43
33
|
"claude 3.5 sonnet",
|
|
34
|
+
"Claude 3.7 Sonnet",
|
|
35
|
+
"Claude 3.7 Sonnet (Thinking)",
|
|
36
|
+
|
|
37
|
+
# Deepseek Models
|
|
38
|
+
"Deepseek R1",
|
|
39
|
+
"Deepseek R1 Fast",
|
|
40
|
+
"Deepseek V3",
|
|
41
|
+
"Deepseek v3 0324",
|
|
42
|
+
|
|
43
|
+
# Google Models
|
|
44
|
+
"Gemini 1.5 Flash",
|
|
45
|
+
"Gemini 1.5 Pro",
|
|
46
|
+
"Gemini 2.0 Pro",
|
|
47
|
+
"Gemini 2.0 Flash",
|
|
48
|
+
"Gemini 2.5 Pro",
|
|
49
|
+
|
|
50
|
+
# Llama Models
|
|
51
|
+
"Llama 3.1 405B",
|
|
52
|
+
"Llama 3.1 70B Fast",
|
|
53
|
+
"Llama 3.3 70B",
|
|
54
|
+
"Llama 3.2 90B Vision",
|
|
55
|
+
"Llama 4 Scout",
|
|
56
|
+
"Llama 4 Maverick",
|
|
57
|
+
|
|
58
|
+
# Mistral Models
|
|
59
|
+
"Mistral Large",
|
|
60
|
+
"Mistral Nemo",
|
|
61
|
+
"Mixtral 8x22B",
|
|
62
|
+
|
|
63
|
+
# Qwen Models
|
|
64
|
+
"Qwen Max",
|
|
65
|
+
"Qwen Plus",
|
|
66
|
+
"Qwen Turbo",
|
|
67
|
+
"QwQ 32B",
|
|
68
|
+
"QwQ Plus",
|
|
69
|
+
|
|
70
|
+
# XAI Models
|
|
71
|
+
"Grok 2",
|
|
72
|
+
"Grok 3",
|
|
44
73
|
]
|
|
45
74
|
|
|
46
75
|
def __init__(
|
|
@@ -119,12 +148,16 @@ class FreeAIChat(Provider):
|
|
|
119
148
|
|
|
120
149
|
messages = [
|
|
121
150
|
{
|
|
122
|
-
"
|
|
123
|
-
"content": self.system_prompt
|
|
124
|
-
},
|
|
125
|
-
{
|
|
151
|
+
"id": str(int(time.time() * 1000)),
|
|
126
152
|
"role": "user",
|
|
127
|
-
"content": conversation_prompt
|
|
153
|
+
"content": conversation_prompt,
|
|
154
|
+
"model": {
|
|
155
|
+
# "id": "14",
|
|
156
|
+
"name": self.model,
|
|
157
|
+
# "icon": "https://cdn-avatars.huggingface.co/v1/production/uploads/1620805164087-5ec0135ded25d76864d553f1.png",
|
|
158
|
+
# "provider": "openAI",
|
|
159
|
+
# "contextWindow": 63920
|
|
160
|
+
}
|
|
128
161
|
}
|
|
129
162
|
]
|
|
130
163
|
|
webscout/Provider/gaurish.py
CHANGED
|
@@ -1,14 +1,12 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
|
-
from typing import
|
|
4
|
-
import uuid
|
|
3
|
+
from typing import Dict, Generator, Union
|
|
5
4
|
|
|
6
5
|
from webscout.AIutel import Optimizers
|
|
7
6
|
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts
|
|
9
|
-
from webscout.AIbase import Provider
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
10
9
|
from webscout import exceptions
|
|
11
|
-
from webscout import LitAgent
|
|
12
10
|
from webscout.Litlogger import Logger, LogFormat
|
|
13
11
|
|
|
14
12
|
class GaurishCerebras(Provider):
|
|
@@ -9,7 +9,7 @@ from webscout.AIutel import Conversation
|
|
|
9
9
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
10
|
from webscout.AIbase import Provider, AsyncProvider
|
|
11
11
|
from webscout import exceptions
|
|
12
|
-
from webscout import LitAgent
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
13
|
|
|
14
14
|
class GeminiPro(Provider):
|
|
15
15
|
"""
|
webscout/Provider/granite.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
|
-
from typing import Any, Dict, Generator
|
|
3
|
+
from typing import Union, Any, Dict, Generator
|
|
4
4
|
|
|
5
5
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
6
|
from webscout.AIbase import Provider
|
|
7
7
|
from webscout import exceptions
|
|
8
|
-
from webscout import LitAgent as Lit
|
|
8
|
+
from webscout.litagent import LitAgent as Lit
|
|
9
9
|
|
|
10
10
|
class IBMGranite(Provider):
|
|
11
11
|
"""
|
|
@@ -81,7 +81,7 @@ class IBMGranite(Provider):
|
|
|
81
81
|
raw: bool = False,
|
|
82
82
|
optimizer: str = None,
|
|
83
83
|
conversationally: bool = False,
|
|
84
|
-
) -> Dict[str, Any]
|
|
84
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
85
85
|
"""Chat with AI
|
|
86
86
|
Args:
|
|
87
87
|
prompt (str): Prompt to be sent.
|
|
@@ -157,7 +157,7 @@ class IBMGranite(Provider):
|
|
|
157
157
|
stream: bool = False,
|
|
158
158
|
optimizer: str = None,
|
|
159
159
|
conversationally: bool = False,
|
|
160
|
-
) -> str
|
|
160
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
161
161
|
"""Generate response as a string using chat method"""
|
|
162
162
|
def for_stream():
|
|
163
163
|
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
webscout/Provider/hermes.py
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
|
-
from typing import Any, Dict, Generator, Optional
|
|
3
|
+
from typing import Union, Any, Dict, Generator, Optional
|
|
4
4
|
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
7
|
from webscout.AIutel import AwesomePrompts
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
|
-
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
11
|
class NousHermes(Provider):
|
|
12
12
|
"""
|
|
13
13
|
A class to interact with the Hermes API.
|
|
@@ -56,7 +56,7 @@ class NousHermes(Provider):
|
|
|
56
56
|
'content-type': 'application/json',
|
|
57
57
|
'origin': 'https://hermes.nousresearch.com',
|
|
58
58
|
'referer': 'https://hermes.nousresearch.com/',
|
|
59
|
-
'user-agent':
|
|
59
|
+
'user-agent': LitAgent().random(),
|
|
60
60
|
'cookie': self.cookies
|
|
61
61
|
}
|
|
62
62
|
|
|
@@ -99,7 +99,7 @@ class NousHermes(Provider):
|
|
|
99
99
|
raw: bool = False,
|
|
100
100
|
optimizer: str = None,
|
|
101
101
|
conversationally: bool = False,
|
|
102
|
-
) -> Dict[str, Any]
|
|
102
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
103
103
|
"""Chat with AI
|
|
104
104
|
Args:
|
|
105
105
|
prompt (str): Prompt to be send.
|
|
@@ -169,7 +169,7 @@ class NousHermes(Provider):
|
|
|
169
169
|
stream: bool = False,
|
|
170
170
|
optimizer: str = None,
|
|
171
171
|
conversationally: bool = False,
|
|
172
|
-
) -> str
|
|
172
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
173
173
|
"""Generate response `str`
|
|
174
174
|
Args:
|
|
175
175
|
prompt (str): Prompt to be send.
|
webscout/Provider/julius.py
CHANGED
|
@@ -8,7 +8,7 @@ from webscout.AIutel import Conversation
|
|
|
8
8
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
10
10
|
from webscout import exceptions
|
|
11
|
-
from typing import Any, AsyncGenerator, Dict
|
|
11
|
+
from typing import Union, Any, AsyncGenerator, Dict
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
class Julius(Provider):
|
webscout/Provider/koala.py
CHANGED
webscout/Provider/lepton.py
CHANGED
|
@@ -6,7 +6,7 @@ from webscout.AIutel import Optimizers
|
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
7
|
from webscout.AIutel import AwesomePrompts
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
|
-
from webscout import LitAgent as Lit
|
|
9
|
+
from webscout.litagent import LitAgent as Lit
|
|
10
10
|
class Lepton(Provider):
|
|
11
11
|
"""
|
|
12
12
|
A class to interact with the Lepton.run API.
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
3
|
import re
|
|
4
|
-
from typing import Any, Dict, Optional, Generator
|
|
4
|
+
from typing import Union, Any, Dict, Optional, Generator
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
7
|
from webscout.AIutel import AwesomePrompts
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
|
-
from webscout import LitAgent as Lit
|
|
10
|
+
from webscout.litagent import LitAgent as Lit
|
|
11
11
|
|
|
12
12
|
class Llama3Mitril(Provider):
|
|
13
13
|
"""
|
|
@@ -77,7 +77,7 @@ class Llama3Mitril(Provider):
|
|
|
77
77
|
raw: bool = False,
|
|
78
78
|
optimizer: str = None,
|
|
79
79
|
conversationally: bool = False,
|
|
80
|
-
) -> Dict[str, Any]
|
|
80
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
81
81
|
"""Sends a prompt to the Llama3 Mitril API and returns the response."""
|
|
82
82
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
83
83
|
if optimizer:
|
|
@@ -143,7 +143,7 @@ class Llama3Mitril(Provider):
|
|
|
143
143
|
stream: bool = True,
|
|
144
144
|
optimizer: str = None,
|
|
145
145
|
conversationally: bool = False,
|
|
146
|
-
) -> str
|
|
146
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
147
147
|
"""Generates a response from the Llama3 Mitril API."""
|
|
148
148
|
|
|
149
149
|
def for_stream():
|
webscout/Provider/llamatutor.py
CHANGED
|
@@ -6,7 +6,7 @@ from webscout.AIutel import Conversation
|
|
|
6
6
|
from webscout.AIutel import AwesomePrompts
|
|
7
7
|
from webscout.AIbase import Provider
|
|
8
8
|
from webscout import exceptions
|
|
9
|
-
from webscout import LitAgent as Lit
|
|
9
|
+
from webscout.litagent import LitAgent as Lit
|
|
10
10
|
|
|
11
11
|
class LlamaTutor(Provider):
|
|
12
12
|
"""
|
webscout/Provider/llmchat.py
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
|
-
from typing import Any, Dict, Optional, Generator, List
|
|
3
|
+
from typing import Union, Any, Dict, Optional, Generator, List
|
|
4
4
|
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
7
|
from webscout.AIutel import AwesomePrompts
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
|
-
from webscout import LitAgent as Lit
|
|
10
|
+
from webscout.litagent import LitAgent as Lit
|
|
11
11
|
|
|
12
12
|
class LLMChat(Provider):
|
|
13
13
|
"""
|
|
@@ -157,7 +157,7 @@ class LLMChat(Provider):
|
|
|
157
157
|
stream: bool = False,
|
|
158
158
|
optimizer: str = None,
|
|
159
159
|
conversationally: bool = False,
|
|
160
|
-
) -> str
|
|
160
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
161
161
|
"""Generate response with logging capabilities"""
|
|
162
162
|
|
|
163
163
|
def for_stream():
|
webscout/Provider/meta.py
CHANGED
|
@@ -14,7 +14,7 @@ from webscout.AIutel import Conversation
|
|
|
14
14
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
15
15
|
from webscout.AIbase import Provider
|
|
16
16
|
from webscout import exceptions
|
|
17
|
-
from webscout import LitAgent as Lit
|
|
17
|
+
from webscout.litagent import LitAgent as Lit
|
|
18
18
|
MAX_RETRIES = 3
|
|
19
19
|
|
|
20
20
|
def generate_offline_threading_id() -> str:
|
webscout/Provider/multichat.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
3
|
import uuid
|
|
4
|
-
from typing import Any, Dict
|
|
4
|
+
from typing import Any, Dict, Union
|
|
5
5
|
from datetime import datetime
|
|
6
6
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
7
|
from webscout.AIbase import Provider
|
|
@@ -76,11 +76,11 @@ class MultiChatAI(Provider):
|
|
|
76
76
|
"deepseek-r1-distill-llama-70b",
|
|
77
77
|
|
|
78
78
|
# Cohere Models
|
|
79
|
-
# "command-r", >>>> NOT WORKING
|
|
80
|
-
# "command", >>>> NOT WORKING
|
|
79
|
+
# "command-r", # >>>> NOT WORKING
|
|
80
|
+
# "command", # >>>> NOT WORKING
|
|
81
81
|
|
|
82
82
|
# Google Models
|
|
83
|
-
# "gemini-1.5-flash-002",
|
|
83
|
+
# "gemini-1.5-flash-002", #>>>> NOT WORKING
|
|
84
84
|
"gemma2-9b-it",
|
|
85
85
|
"gemini-2.0-flash",
|
|
86
86
|
|
|
@@ -91,13 +91,13 @@ class MultiChatAI(Provider):
|
|
|
91
91
|
"deepseek-ai/DeepSeek-V3",
|
|
92
92
|
"meta-llama/Meta-Llama-3.1-405B-Instruct",
|
|
93
93
|
"NousResearch/Hermes-3-Llama-3.1-405B",
|
|
94
|
-
# "gemma-2-27b-it", >>>> NOT WORKING
|
|
94
|
+
# "gemma-2-27b-it", # >>>> NOT WORKING
|
|
95
95
|
|
|
96
96
|
# Mistral Models
|
|
97
|
-
# "mistral-small-latest", >>>> NOT WORKING
|
|
98
|
-
# "codestral-latest", >>>> NOT WORKING
|
|
99
|
-
# "open-mistral-7b", >>>> NOT WORKING
|
|
100
|
-
# "open-mixtral-8x7b", >>>> NOT WORKING
|
|
97
|
+
# "mistral-small-latest", # >>>> NOT WORKING
|
|
98
|
+
# "codestral-latest", # >>>> NOT WORKING
|
|
99
|
+
# "open-mistral-7b", # >>>> NOT WORKING
|
|
100
|
+
# "open-mixtral-8x7b", # >>>> NOT WORKING
|
|
101
101
|
|
|
102
102
|
# Alibaba Models
|
|
103
103
|
"Qwen/Qwen2.5-72B-Instruct",
|
|
@@ -284,7 +284,7 @@ class MultiChatAI(Provider):
|
|
|
284
284
|
)
|
|
285
285
|
return self.get_message(response)
|
|
286
286
|
|
|
287
|
-
def get_message(self, response: Dict[str, Any]
|
|
287
|
+
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
288
288
|
"""
|
|
289
289
|
Retrieves message from response.
|
|
290
290
|
|
|
@@ -6,7 +6,7 @@ from webscout.AIutel import Optimizers
|
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
7
|
from webscout.AIutel import AwesomePrompts
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
|
-
from webscout import LitAgent as UserAgent
|
|
9
|
+
from webscout.litagent import LitAgent as UserAgent
|
|
10
10
|
|
|
11
11
|
class PromptRefine(Provider):
|
|
12
12
|
"""
|