webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/llamatutor.py
DELETED
|
@@ -1,192 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
|
|
4
|
-
from webscout.AIutel import Optimizers
|
|
5
|
-
from webscout.AIutel import Conversation
|
|
6
|
-
from webscout.AIutel import AwesomePrompts
|
|
7
|
-
from webscout.AIbase import Provider
|
|
8
|
-
from webscout import exceptions
|
|
9
|
-
from webscout.litagent import LitAgent as Lit
|
|
10
|
-
|
|
11
|
-
class LlamaTutor(Provider):
|
|
12
|
-
"""
|
|
13
|
-
A class to interact with the LlamaTutor API (Together.ai)
|
|
14
|
-
"""
|
|
15
|
-
AVAILABLE_MODELS = ["UNKNOWN"]
|
|
16
|
-
def __init__(
|
|
17
|
-
self,
|
|
18
|
-
is_conversation: bool = True,
|
|
19
|
-
max_tokens: int = 600,
|
|
20
|
-
timeout: int = 30,
|
|
21
|
-
intro: str = None,
|
|
22
|
-
filepath: str = None,
|
|
23
|
-
update_file: bool = True,
|
|
24
|
-
proxies: dict = {},
|
|
25
|
-
history_offset: int = 10250,
|
|
26
|
-
act: str = None,
|
|
27
|
-
system_prompt: str = "You are a helpful AI assistant."
|
|
28
|
-
):
|
|
29
|
-
"""
|
|
30
|
-
Initializes the LlamaTutor API with given parameters.
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
|
-
self.session = requests.Session()
|
|
34
|
-
self.is_conversation = is_conversation
|
|
35
|
-
self.max_tokens_to_sample = max_tokens
|
|
36
|
-
self.api_endpoint = "https://llamatutor.together.ai/api/getChat"
|
|
37
|
-
self.stream_chunk_size = 64
|
|
38
|
-
self.timeout = timeout
|
|
39
|
-
self.last_response = {}
|
|
40
|
-
self.system_prompt = system_prompt
|
|
41
|
-
|
|
42
|
-
self.headers = {
|
|
43
|
-
"Content-Type": "application/json",
|
|
44
|
-
"Accept": "*/*",
|
|
45
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
46
|
-
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
47
|
-
"DNT": "1",
|
|
48
|
-
"Origin": "https://llamatutor.together.ai",
|
|
49
|
-
"Referer": "https://llamatutor.together.ai/",
|
|
50
|
-
"Sec-Ch-Ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
51
|
-
"Sec-Ch-Ua-Mobile": "?0",
|
|
52
|
-
"Sec-Ch-Ua-Platform": '"Windows"',
|
|
53
|
-
"Sec-Fetch-Dest": "empty",
|
|
54
|
-
"Sec-Fetch-Mode": "cors",
|
|
55
|
-
"Sec-Fetch-Site": "same-origin",
|
|
56
|
-
"User-Agent": Lit().random(),
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
self.__available_optimizers = (
|
|
60
|
-
method
|
|
61
|
-
for method in dir(Optimizers)
|
|
62
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
self.session.headers.update(self.headers)
|
|
66
|
-
|
|
67
|
-
Conversation.intro = (
|
|
68
|
-
AwesomePrompts().get_act(
|
|
69
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
70
|
-
)
|
|
71
|
-
if act
|
|
72
|
-
else intro or Conversation.intro
|
|
73
|
-
)
|
|
74
|
-
|
|
75
|
-
self.conversation = Conversation(
|
|
76
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
77
|
-
)
|
|
78
|
-
self.conversation.history_offset = history_offset
|
|
79
|
-
self.session.proxies = proxies
|
|
80
|
-
|
|
81
|
-
def ask(
|
|
82
|
-
self,
|
|
83
|
-
prompt: str,
|
|
84
|
-
stream: bool = False,
|
|
85
|
-
raw: bool = False,
|
|
86
|
-
optimizer: str = None,
|
|
87
|
-
conversationally: bool = False,
|
|
88
|
-
) -> dict:
|
|
89
|
-
"""Chat with LlamaTutor"""
|
|
90
|
-
|
|
91
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
92
|
-
if optimizer:
|
|
93
|
-
if optimizer in self.__available_optimizers:
|
|
94
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
95
|
-
conversation_prompt if conversationally else prompt
|
|
96
|
-
)
|
|
97
|
-
else:
|
|
98
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
99
|
-
|
|
100
|
-
payload = {
|
|
101
|
-
"messages": [
|
|
102
|
-
{
|
|
103
|
-
"role": "system",
|
|
104
|
-
"content": self.system_prompt
|
|
105
|
-
},
|
|
106
|
-
{
|
|
107
|
-
"role": "user",
|
|
108
|
-
"content": conversation_prompt
|
|
109
|
-
}
|
|
110
|
-
]
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
def for_stream():
|
|
114
|
-
try:
|
|
115
|
-
|
|
116
|
-
response = requests.post(
|
|
117
|
-
self.api_endpoint,
|
|
118
|
-
headers=self.headers,
|
|
119
|
-
data=json.dumps(payload),
|
|
120
|
-
stream=True,
|
|
121
|
-
timeout=self.timeout
|
|
122
|
-
)
|
|
123
|
-
response.raise_for_status()
|
|
124
|
-
|
|
125
|
-
full_response = ''
|
|
126
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
127
|
-
if line:
|
|
128
|
-
try:
|
|
129
|
-
decoded_line = line.decode('utf-8')
|
|
130
|
-
if decoded_line.startswith("data: "):
|
|
131
|
-
json_data = json.loads(decoded_line[6:])
|
|
132
|
-
if "text" in json_data:
|
|
133
|
-
full_response += json_data["text"]
|
|
134
|
-
yield json_data["text"] if raw else dict(text=json_data["text"])
|
|
135
|
-
except json.JSONDecodeError as e:
|
|
136
|
-
continue
|
|
137
|
-
|
|
138
|
-
self.last_response.update(dict(text=full_response))
|
|
139
|
-
self.conversation.update_chat_history(
|
|
140
|
-
prompt, self.get_message(self.last_response)
|
|
141
|
-
)
|
|
142
|
-
|
|
143
|
-
except requests.exceptions.HTTPError as http_err:
|
|
144
|
-
raise exceptions.FailedToGenerateResponseError(f"HTTP error occurred: {http_err}")
|
|
145
|
-
except requests.exceptions.RequestException as err:
|
|
146
|
-
raise exceptions.FailedToGenerateResponseError(f"An error occurred: {err}")
|
|
147
|
-
|
|
148
|
-
def for_non_stream():
|
|
149
|
-
for _ in for_stream():
|
|
150
|
-
pass
|
|
151
|
-
return self.last_response
|
|
152
|
-
|
|
153
|
-
return for_stream() if stream else for_non_stream()
|
|
154
|
-
|
|
155
|
-
def chat(
|
|
156
|
-
self,
|
|
157
|
-
prompt: str,
|
|
158
|
-
stream: bool = False,
|
|
159
|
-
optimizer: str = None,
|
|
160
|
-
conversationally: bool = False,
|
|
161
|
-
) -> str:
|
|
162
|
-
"""Generate response"""
|
|
163
|
-
|
|
164
|
-
def for_stream():
|
|
165
|
-
for response in self.ask(
|
|
166
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
167
|
-
):
|
|
168
|
-
yield self.get_message(response)
|
|
169
|
-
|
|
170
|
-
def for_non_stream():
|
|
171
|
-
return self.get_message(
|
|
172
|
-
self.ask(
|
|
173
|
-
prompt,
|
|
174
|
-
False,
|
|
175
|
-
optimizer=optimizer,
|
|
176
|
-
conversationally=conversationally,
|
|
177
|
-
)
|
|
178
|
-
)
|
|
179
|
-
|
|
180
|
-
return for_stream() if stream else for_non_stream()
|
|
181
|
-
|
|
182
|
-
def get_message(self, response: dict) -> str:
|
|
183
|
-
"""Retrieves message from response with validation"""
|
|
184
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
185
|
-
return response["text"]
|
|
186
|
-
|
|
187
|
-
if __name__ == "__main__":
|
|
188
|
-
from rich import print
|
|
189
|
-
ai = LlamaTutor()
|
|
190
|
-
response = ai.chat("Write a poem about AI", stream=True)
|
|
191
|
-
for chunk in response:
|
|
192
|
-
print(chunk, end="", flush=True)
|
|
File without changes
|