webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/labyrinth.py
DELETED
|
@@ -1,340 +0,0 @@
|
|
|
1
|
-
from typing import Union, Any, Dict, Generator
|
|
2
|
-
from uuid import uuid4
|
|
3
|
-
import requests
|
|
4
|
-
import re
|
|
5
|
-
import json
|
|
6
|
-
|
|
7
|
-
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
9
|
-
from webscout.AIutel import AwesomePrompts
|
|
10
|
-
from webscout.AIbase import Provider
|
|
11
|
-
from webscout import exceptions
|
|
12
|
-
from webscout.litagent import LitAgent
|
|
13
|
-
|
|
14
|
-
class LabyrinthAI(Provider):
|
|
15
|
-
"""
|
|
16
|
-
A class to interact with the Labyrinth AI chat API.
|
|
17
|
-
|
|
18
|
-
Attributes:
|
|
19
|
-
system_prompt (str): The system prompt to define the assistant's role.
|
|
20
|
-
|
|
21
|
-
Examples:
|
|
22
|
-
>>> from webscout.Provider.labyrinth import LabyrinthAI
|
|
23
|
-
>>> ai = LabyrinthAI()
|
|
24
|
-
>>> response = ai.chat("What's the weather today?")
|
|
25
|
-
>>> print(response)
|
|
26
|
-
'The weather today is sunny with a high of 75°F.'
|
|
27
|
-
"""
|
|
28
|
-
|
|
29
|
-
# AVAILABLE_MODELS = [
|
|
30
|
-
# "gemini-2.0-flash"
|
|
31
|
-
# ]
|
|
32
|
-
|
|
33
|
-
def __init__(
|
|
34
|
-
self,
|
|
35
|
-
is_conversation: bool = True,
|
|
36
|
-
max_tokens: int = 2049,
|
|
37
|
-
timeout: int = 30,
|
|
38
|
-
intro: str = None,
|
|
39
|
-
filepath: str = None,
|
|
40
|
-
update_file: bool = True,
|
|
41
|
-
proxies: dict = {},
|
|
42
|
-
history_offset: int = 10250,
|
|
43
|
-
act: str = None,
|
|
44
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
45
|
-
# model: str = "gemini-2.0-flash",
|
|
46
|
-
browser: str = "chrome"
|
|
47
|
-
):
|
|
48
|
-
"""
|
|
49
|
-
Initializes the Labyrinth AI API with given parameters.
|
|
50
|
-
|
|
51
|
-
Args:
|
|
52
|
-
is_conversation (bool): Whether the provider is in conversation mode.
|
|
53
|
-
max_tokens (int): Maximum number of tokens to sample.
|
|
54
|
-
timeout (int): Timeout for API requests.
|
|
55
|
-
intro (str): Introduction message for the conversation.
|
|
56
|
-
filepath (str): Filepath for storing conversation history.
|
|
57
|
-
update_file (bool): Whether to update the conversation history file.
|
|
58
|
-
proxies (dict): Proxies for the API requests.
|
|
59
|
-
history_offset (int): Offset for conversation history.
|
|
60
|
-
act (str): Act for the conversation.
|
|
61
|
-
system_prompt (str): The system prompt to define the assistant's role.
|
|
62
|
-
browser (str): Browser type to emulate in the user agent.
|
|
63
|
-
|
|
64
|
-
Examples:
|
|
65
|
-
>>> ai = LabyrinthAI(system_prompt="You are a friendly assistant.")
|
|
66
|
-
>>> print(ai.system_prompt)
|
|
67
|
-
'You are a friendly assistant.'
|
|
68
|
-
"""
|
|
69
|
-
# if model not in self.AVAILABLE_MODELS:
|
|
70
|
-
# raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
71
|
-
|
|
72
|
-
self.url = "https://labyrinth-ebon.vercel.app/api/chat"
|
|
73
|
-
self.system_prompt = system_prompt
|
|
74
|
-
|
|
75
|
-
# Initialize LitAgent for user agent generation
|
|
76
|
-
self.agent = LitAgent()
|
|
77
|
-
# Use fingerprinting to create a consistent browser identity
|
|
78
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
79
|
-
|
|
80
|
-
# Use the fingerprint for headers
|
|
81
|
-
self.headers = {
|
|
82
|
-
"Accept": self.fingerprint["accept"],
|
|
83
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
84
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
85
|
-
"Content-Type": "application/json",
|
|
86
|
-
"Origin": "https://labyrinth-ebon.vercel.app",
|
|
87
|
-
"Cookie": "stock-mode=false; __Host-next-auth.csrf-token=68aa6224f2ff7bbf2c4480a90c49b7b95aaac01a63ed90f3d20a69292c16a366%7C1f6672653c6e304ea971373fecdc3fe491568d014c68cdf3b26ead42f1c6ac62; __Secure-next-auth.callback-url=https%3A%2F%2Flabyrinth-ebon.vercel.app%2F; selectedModel={\"id\":\"gemini-2.0-flash\",\"name\":\"Gemini 2.0 Flash\",\"provider\":\"Google Generative AI\",\"providerId\":\"google\",\"enabled\":true,\"toolCallType\":\"native\",\"searchMode\":true}; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..Z5-1j_rsCWRHY17B.s0lMkhWr0S7a3-4h2p-ce0NJHeNyh8nDyOcsrzFU8AZtBbygGcHKbJ8PzLLQBNL7NwrUwET3fKGbtnAphaVjuSJQfXA0tu69zKJELPw-A3x0Ev6aHJMTG3l9_SweByHyfCSCnGB7tvjwEFsW4c5xs_HzMdPmoRTYyYzlZPuDGhHtQX7WyeUiARc36NfwV-KJYpzXV5-g0VkpsxFEawcfdk6D_S7JtOMmjMTTYuw2BbNYvtlvM-n_XivIctQmQ5Fp65JEE73nr5hWVReyYrkyfUGt4Q.TP8Woa-7Ao05yVCjbbGDug",
|
|
88
|
-
"Referer": "https://labyrinth-ebon.vercel.app/",
|
|
89
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
90
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
91
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
92
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
93
|
-
"Sec-Fetch-Dest": "empty",
|
|
94
|
-
"Sec-Fetch-Mode": "cors",
|
|
95
|
-
"Sec-Fetch-Site": "same-origin",
|
|
96
|
-
"Sec-GPC": "1"
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
self.session = requests.Session()
|
|
100
|
-
self.session.headers.update(self.headers)
|
|
101
|
-
self.session.proxies.update(proxies)
|
|
102
|
-
|
|
103
|
-
self.is_conversation = is_conversation
|
|
104
|
-
self.max_tokens_to_sample = max_tokens
|
|
105
|
-
self.timeout = timeout
|
|
106
|
-
self.last_response = {}
|
|
107
|
-
# self.model = model
|
|
108
|
-
|
|
109
|
-
self.__available_optimizers = (
|
|
110
|
-
method
|
|
111
|
-
for method in dir(Optimizers)
|
|
112
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
113
|
-
)
|
|
114
|
-
Conversation.intro = (
|
|
115
|
-
AwesomePrompts().get_act(
|
|
116
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
117
|
-
)
|
|
118
|
-
if act
|
|
119
|
-
else intro or Conversation.intro
|
|
120
|
-
)
|
|
121
|
-
|
|
122
|
-
self.conversation = Conversation(
|
|
123
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
124
|
-
)
|
|
125
|
-
self.conversation.history_offset = history_offset
|
|
126
|
-
|
|
127
|
-
def refresh_identity(self, browser: str = None):
|
|
128
|
-
"""
|
|
129
|
-
Refreshes the browser identity fingerprint.
|
|
130
|
-
|
|
131
|
-
Args:
|
|
132
|
-
browser: Specific browser to use for the new fingerprint
|
|
133
|
-
"""
|
|
134
|
-
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
135
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
136
|
-
|
|
137
|
-
# Update headers with new fingerprint
|
|
138
|
-
self.headers.update({
|
|
139
|
-
"Accept": self.fingerprint["accept"],
|
|
140
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
141
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
|
|
142
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
143
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
144
|
-
})
|
|
145
|
-
|
|
146
|
-
# Update session headers
|
|
147
|
-
for header, value in self.headers.items():
|
|
148
|
-
self.session.headers[header] = value
|
|
149
|
-
|
|
150
|
-
return self.fingerprint
|
|
151
|
-
|
|
152
|
-
def ask(
|
|
153
|
-
self,
|
|
154
|
-
prompt: str,
|
|
155
|
-
stream: bool = False,
|
|
156
|
-
raw: bool = False,
|
|
157
|
-
optimizer: str = None,
|
|
158
|
-
conversationally: bool = False,
|
|
159
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
160
|
-
"""
|
|
161
|
-
Sends a prompt to the Labyrinth AI API and returns the response.
|
|
162
|
-
|
|
163
|
-
Args:
|
|
164
|
-
prompt (str): The prompt to send to the API.
|
|
165
|
-
stream (bool): Whether to stream the response.
|
|
166
|
-
raw (bool): Whether to return the raw response.
|
|
167
|
-
optimizer (str): Optimizer to use for the prompt.
|
|
168
|
-
conversationally (bool): Whether to generate the prompt conversationally.
|
|
169
|
-
|
|
170
|
-
Returns:
|
|
171
|
-
Union[Dict[str, Any], Generator]: The API response.
|
|
172
|
-
|
|
173
|
-
Examples:
|
|
174
|
-
>>> ai = LabyrinthAI()
|
|
175
|
-
>>> response = ai.ask("Tell me a joke!")
|
|
176
|
-
>>> print(response)
|
|
177
|
-
{'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
|
|
178
|
-
"""
|
|
179
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
180
|
-
if optimizer:
|
|
181
|
-
if optimizer in self.__available_optimizers:
|
|
182
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
183
|
-
conversation_prompt if conversationally else prompt
|
|
184
|
-
)
|
|
185
|
-
else:
|
|
186
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
187
|
-
|
|
188
|
-
# Prepare the request payload
|
|
189
|
-
payload = {
|
|
190
|
-
"id": str(uuid4()),
|
|
191
|
-
"messages": [
|
|
192
|
-
{
|
|
193
|
-
"role": "system",
|
|
194
|
-
"content": self.system_prompt
|
|
195
|
-
},
|
|
196
|
-
{
|
|
197
|
-
"role": "user",
|
|
198
|
-
"content": conversation_prompt,
|
|
199
|
-
"parts": [{"type": "text", "text": conversation_prompt}]
|
|
200
|
-
}
|
|
201
|
-
],
|
|
202
|
-
"stockMode": False
|
|
203
|
-
}
|
|
204
|
-
|
|
205
|
-
def for_stream():
|
|
206
|
-
try:
|
|
207
|
-
with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
|
|
208
|
-
if response.status_code != 200:
|
|
209
|
-
# If we get a non-200 response, try refreshing our identity once
|
|
210
|
-
if response.status_code in [403, 429]:
|
|
211
|
-
self.refresh_identity()
|
|
212
|
-
# Retry with new identity
|
|
213
|
-
with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as retry_response:
|
|
214
|
-
if not retry_response.ok:
|
|
215
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
216
|
-
f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
|
|
217
|
-
)
|
|
218
|
-
response = retry_response
|
|
219
|
-
else:
|
|
220
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
221
|
-
f"Request failed with status code {response.status_code}"
|
|
222
|
-
)
|
|
223
|
-
|
|
224
|
-
streaming_text = ""
|
|
225
|
-
for line in response.iter_lines():
|
|
226
|
-
if line:
|
|
227
|
-
try:
|
|
228
|
-
line = line.decode('utf-8')
|
|
229
|
-
match = re.search(r'0:"(.*?)"', line)
|
|
230
|
-
if match:
|
|
231
|
-
content = match.group(1)
|
|
232
|
-
streaming_text += content
|
|
233
|
-
resp = dict(text=content)
|
|
234
|
-
yield resp if raw else resp
|
|
235
|
-
except UnicodeDecodeError:
|
|
236
|
-
continue
|
|
237
|
-
|
|
238
|
-
self.last_response = {"text": streaming_text}
|
|
239
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
240
|
-
|
|
241
|
-
except requests.RequestException as e:
|
|
242
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
243
|
-
|
|
244
|
-
def for_non_stream():
|
|
245
|
-
try:
|
|
246
|
-
response = self.session.post(self.url, json=payload, timeout=self.timeout)
|
|
247
|
-
if response.status_code != 200:
|
|
248
|
-
if response.status_code in [403, 429]:
|
|
249
|
-
self.refresh_identity()
|
|
250
|
-
response = self.session.post(self.url, json=payload, timeout=self.timeout)
|
|
251
|
-
if not response.ok:
|
|
252
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
253
|
-
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
|
|
254
|
-
)
|
|
255
|
-
else:
|
|
256
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
257
|
-
f"Request failed with status code {response.status_code}"
|
|
258
|
-
)
|
|
259
|
-
|
|
260
|
-
full_response = ""
|
|
261
|
-
for line in response.iter_lines():
|
|
262
|
-
if line:
|
|
263
|
-
try:
|
|
264
|
-
line = line.decode('utf-8')
|
|
265
|
-
match = re.search(r'0:"(.*?)"', line)
|
|
266
|
-
if match:
|
|
267
|
-
content = match.group(1)
|
|
268
|
-
full_response += content
|
|
269
|
-
except UnicodeDecodeError:
|
|
270
|
-
continue
|
|
271
|
-
|
|
272
|
-
self.last_response = {"text": full_response}
|
|
273
|
-
self.conversation.update_chat_history(prompt, full_response)
|
|
274
|
-
return {"text": full_response}
|
|
275
|
-
except Exception as e:
|
|
276
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
277
|
-
|
|
278
|
-
return for_stream() if stream else for_non_stream()
|
|
279
|
-
|
|
280
|
-
def chat(
|
|
281
|
-
self,
|
|
282
|
-
prompt: str,
|
|
283
|
-
stream: bool = False,
|
|
284
|
-
optimizer: str = None,
|
|
285
|
-
conversationally: bool = False,
|
|
286
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
287
|
-
"""
|
|
288
|
-
Generates a response from the Labyrinth AI API.
|
|
289
|
-
|
|
290
|
-
Args:
|
|
291
|
-
prompt (str): The prompt to send to the API.
|
|
292
|
-
stream (bool): Whether to stream the response.
|
|
293
|
-
optimizer (str): Optimizer to use for the prompt.
|
|
294
|
-
conversationally (bool): Whether to generate the prompt conversationally.
|
|
295
|
-
|
|
296
|
-
Returns:
|
|
297
|
-
Union[str, Generator[str, None, None]]: The API response.
|
|
298
|
-
|
|
299
|
-
Examples:
|
|
300
|
-
>>> ai = LabyrinthAI()
|
|
301
|
-
>>> response = ai.chat("What's the weather today?")
|
|
302
|
-
>>> print(response)
|
|
303
|
-
'The weather today is sunny with a high of 75°F.'
|
|
304
|
-
"""
|
|
305
|
-
def for_stream():
|
|
306
|
-
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
307
|
-
yield self.get_message(response)
|
|
308
|
-
def for_non_stream():
|
|
309
|
-
return self.get_message(
|
|
310
|
-
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
311
|
-
)
|
|
312
|
-
return for_stream() if stream else for_non_stream()
|
|
313
|
-
|
|
314
|
-
def get_message(self, response: dict) -> str:
|
|
315
|
-
"""
|
|
316
|
-
Extracts the message from the API response.
|
|
317
|
-
|
|
318
|
-
Args:
|
|
319
|
-
response (dict): The API response.
|
|
320
|
-
|
|
321
|
-
Returns:
|
|
322
|
-
str: The message content.
|
|
323
|
-
|
|
324
|
-
Examples:
|
|
325
|
-
>>> ai = LabyrinthAI()
|
|
326
|
-
>>> response = ai.ask("Tell me a joke!")
|
|
327
|
-
>>> message = ai.get_message(response)
|
|
328
|
-
>>> print(message)
|
|
329
|
-
'Why did the scarecrow win an award? Because he was outstanding in his field!'
|
|
330
|
-
"""
|
|
331
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
332
|
-
formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
333
|
-
return formatted_text
|
|
334
|
-
|
|
335
|
-
if __name__ == "__main__":
|
|
336
|
-
from rich import print
|
|
337
|
-
ai = LabyrinthAI()
|
|
338
|
-
resp = ai.chat("What is the capital of France?", stream=True)
|
|
339
|
-
for message in resp:
|
|
340
|
-
print(message, end='', flush=True)
|
webscout/Provider/lepton.py
DELETED
|
@@ -1,194 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import re
|
|
3
|
-
import json
|
|
4
|
-
|
|
5
|
-
from webscout.AIutel import Optimizers
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout.litagent import LitAgent as Lit
|
|
10
|
-
class Lepton(Provider):
|
|
11
|
-
"""
|
|
12
|
-
A class to interact with the Lepton.run API.
|
|
13
|
-
"""
|
|
14
|
-
def __init__(
|
|
15
|
-
self,
|
|
16
|
-
is_conversation: bool = True,
|
|
17
|
-
max_tokens: int = 600,
|
|
18
|
-
timeout: int = 30,
|
|
19
|
-
intro: str = None,
|
|
20
|
-
filepath: str = None,
|
|
21
|
-
update_file: bool = True,
|
|
22
|
-
proxies: dict = {},
|
|
23
|
-
history_offset: int = 10250,
|
|
24
|
-
act: str = None,
|
|
25
|
-
) -> None:
|
|
26
|
-
"""Instantiates Lepton
|
|
27
|
-
|
|
28
|
-
Args:
|
|
29
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
30
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
31
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
32
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
33
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
34
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
35
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
36
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
37
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
38
|
-
"""
|
|
39
|
-
self.session = requests.Session()
|
|
40
|
-
self.is_conversation = is_conversation
|
|
41
|
-
self.max_tokens_to_sample = max_tokens
|
|
42
|
-
self.api_endpoint = "https://search.lepton.run/api/query"
|
|
43
|
-
self.stream_chunk_size = 64
|
|
44
|
-
self.timeout = timeout
|
|
45
|
-
self.last_response = {}
|
|
46
|
-
self.headers = {
|
|
47
|
-
"accept": "*/*",
|
|
48
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
49
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
50
|
-
"content-type": "text/plain;charset=UTF-8",
|
|
51
|
-
"dnt": "1",
|
|
52
|
-
"origin": "https://search.lepton.run",
|
|
53
|
-
"priority": "u=1, i",
|
|
54
|
-
"referer": "https://search.lepton.run/search?q=BYSyA&rid=aqZSHQomzwBBF3fyHnrND",
|
|
55
|
-
"sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
56
|
-
"sec-ch-ua-mobile": "?0",
|
|
57
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
58
|
-
"sec-fetch-dest": "empty",
|
|
59
|
-
"sec-fetch-mode": "cors",
|
|
60
|
-
"sec-fetch-site": "same-origin",
|
|
61
|
-
"user-agent": Lit().random(),
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
self.__available_optimizers = (
|
|
65
|
-
method
|
|
66
|
-
for method in dir(Optimizers)
|
|
67
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
68
|
-
)
|
|
69
|
-
self.session.headers.update(self.headers)
|
|
70
|
-
Conversation.intro = (
|
|
71
|
-
AwesomePrompts().get_act(
|
|
72
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
73
|
-
)
|
|
74
|
-
if act
|
|
75
|
-
else intro or Conversation.intro
|
|
76
|
-
)
|
|
77
|
-
self.conversation = Conversation(
|
|
78
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
79
|
-
)
|
|
80
|
-
self.conversation.history_offset = history_offset
|
|
81
|
-
self.session.proxies = proxies
|
|
82
|
-
|
|
83
|
-
def ask(
|
|
84
|
-
self,
|
|
85
|
-
prompt: str,
|
|
86
|
-
stream: bool = False,
|
|
87
|
-
raw: bool = False,
|
|
88
|
-
optimizer: str = None,
|
|
89
|
-
conversationally: bool = False,
|
|
90
|
-
) -> dict:
|
|
91
|
-
"""Chat with AI
|
|
92
|
-
|
|
93
|
-
Args:
|
|
94
|
-
prompt (str): Prompt to be send.
|
|
95
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
96
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
97
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
98
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
99
|
-
Returns:
|
|
100
|
-
dict : {}
|
|
101
|
-
```json
|
|
102
|
-
{
|
|
103
|
-
"text" : "How may I assist you today?"
|
|
104
|
-
}
|
|
105
|
-
```
|
|
106
|
-
"""
|
|
107
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
108
|
-
if optimizer:
|
|
109
|
-
if optimizer in self.__available_optimizers:
|
|
110
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
111
|
-
conversation_prompt if conversationally else prompt
|
|
112
|
-
)
|
|
113
|
-
else:
|
|
114
|
-
raise Exception(
|
|
115
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
self.session.headers.update(self.headers)
|
|
119
|
-
payload = json.dumps({"query": conversation_prompt})
|
|
120
|
-
|
|
121
|
-
def for_non_stream():
|
|
122
|
-
response = self.session.post(
|
|
123
|
-
self.api_endpoint, data=payload, headers=self.headers, timeout=self.timeout
|
|
124
|
-
)
|
|
125
|
-
if not response.ok:
|
|
126
|
-
raise Exception(
|
|
127
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
response_text = response.text
|
|
131
|
-
start_marker = "__LLM_RESPONSE__"
|
|
132
|
-
end_marker = "__RELATED_QUESTIONS__"
|
|
133
|
-
|
|
134
|
-
start_index = response_text.find(start_marker) + len(start_marker)
|
|
135
|
-
end_index = response_text.find(end_marker)
|
|
136
|
-
|
|
137
|
-
if start_index != -1 and end_index != -1:
|
|
138
|
-
extracted_text = response_text[start_index:end_index].strip()
|
|
139
|
-
|
|
140
|
-
# Remove citations using regular expression
|
|
141
|
-
cleaned_text = re.sub(r'\[citation:\d+\]', '', extracted_text)
|
|
142
|
-
|
|
143
|
-
self.last_response.update(dict(text=cleaned_text))
|
|
144
|
-
|
|
145
|
-
self.conversation.update_chat_history(
|
|
146
|
-
prompt, self.get_message(self.last_response)
|
|
147
|
-
)
|
|
148
|
-
return self.last_response
|
|
149
|
-
|
|
150
|
-
return for_non_stream()
|
|
151
|
-
|
|
152
|
-
def chat(
|
|
153
|
-
self,
|
|
154
|
-
prompt: str,
|
|
155
|
-
stream: bool = False,
|
|
156
|
-
optimizer: str = None,
|
|
157
|
-
conversationally: bool = False,
|
|
158
|
-
) -> str:
|
|
159
|
-
"""Generate response `str`
|
|
160
|
-
Args:
|
|
161
|
-
prompt (str): Prompt to be send.
|
|
162
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
163
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
164
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
165
|
-
Returns:
|
|
166
|
-
str: Response generated
|
|
167
|
-
"""
|
|
168
|
-
|
|
169
|
-
return self.get_message(
|
|
170
|
-
self.ask(
|
|
171
|
-
prompt,
|
|
172
|
-
optimizer=optimizer,
|
|
173
|
-
conversationally=conversationally,
|
|
174
|
-
)
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
def get_message(self, response: dict) -> str:
|
|
178
|
-
"""Retrieves message only from response
|
|
179
|
-
|
|
180
|
-
Args:
|
|
181
|
-
response (dict): Response generated by `self.ask`
|
|
182
|
-
|
|
183
|
-
Returns:
|
|
184
|
-
str: Message extracted
|
|
185
|
-
"""
|
|
186
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
187
|
-
return response["text"]
|
|
188
|
-
|
|
189
|
-
if __name__ == '__main__':
|
|
190
|
-
from rich import print
|
|
191
|
-
ai = Lepton()
|
|
192
|
-
response = ai.chat("hi")
|
|
193
|
-
for chunk in response:
|
|
194
|
-
print(chunk, end="", flush=True)
|