webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
webscout/Provider/WebSim.py
DELETED
|
@@ -1,228 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import string
|
|
4
|
-
import random
|
|
5
|
-
from typing import Any, Dict, Union
|
|
6
|
-
|
|
7
|
-
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
9
|
-
from webscout.AIutel import AwesomePrompts
|
|
10
|
-
from webscout.AIbase import Provider
|
|
11
|
-
from webscout import exceptions
|
|
12
|
-
from webscout.litagent import LitAgent
|
|
13
|
-
|
|
14
|
-
class WebSim(Provider):
|
|
15
|
-
"""
|
|
16
|
-
A class to interact with the WebSim API.
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
url = "https://websim.ai"
|
|
20
|
-
chat_api_endpoint = "https://websim.ai/api/v1/inference/run_chat_completion"
|
|
21
|
-
image_api_endpoint = "https://websim.ai/api/v1/inference/run_image_generation"
|
|
22
|
-
|
|
23
|
-
image_models = ['flux']
|
|
24
|
-
AVAILABLE_MODELS = ['gemini-1.5-flash', 'gemini-1.5-pro', 'gemini-flash', 'gemini-pro', 'gemini-flash-thinking'] + image_models
|
|
25
|
-
|
|
26
|
-
@staticmethod
|
|
27
|
-
def generate_project_id(for_image=False):
|
|
28
|
-
"""
|
|
29
|
-
Generate a project ID in the appropriate format
|
|
30
|
-
|
|
31
|
-
For chat: format like 'ke3_xh5gai3gjkmruomu'
|
|
32
|
-
For image: format like 'kx0m131_rzz66qb2xoy7'
|
|
33
|
-
"""
|
|
34
|
-
chars = string.ascii_lowercase + string.digits
|
|
35
|
-
|
|
36
|
-
if for_image:
|
|
37
|
-
first_part = ''.join(random.choices(chars, k=7))
|
|
38
|
-
second_part = ''.join(random.choices(chars, k=12))
|
|
39
|
-
return f"{first_part}_{second_part}"
|
|
40
|
-
else:
|
|
41
|
-
prefix = ''.join(random.choices(chars, k=3))
|
|
42
|
-
suffix = ''.join(random.choices(chars, k=15))
|
|
43
|
-
return f"{prefix}_{suffix}"
|
|
44
|
-
|
|
45
|
-
def __init__(
|
|
46
|
-
self,
|
|
47
|
-
is_conversation: bool = True,
|
|
48
|
-
max_tokens: int = 2049,
|
|
49
|
-
timeout: int = 30,
|
|
50
|
-
intro: str = None,
|
|
51
|
-
filepath: str = None,
|
|
52
|
-
update_file: bool = True,
|
|
53
|
-
proxies: dict = {},
|
|
54
|
-
history_offset: int = 10250,
|
|
55
|
-
act: str = None,
|
|
56
|
-
model: str = 'gemini-1.5-pro',
|
|
57
|
-
aspect_ratio: str = "1:1"
|
|
58
|
-
):
|
|
59
|
-
"""Initializes the WebSim API client."""
|
|
60
|
-
if model not in self.AVAILABLE_MODELS:
|
|
61
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
62
|
-
self.agent = LitAgent()
|
|
63
|
-
self.headers = {
|
|
64
|
-
'accept': '*/*',
|
|
65
|
-
'accept-language': 'en-US,en;q=0.9',
|
|
66
|
-
'content-type': 'text/plain;charset=UTF-8',
|
|
67
|
-
'origin': 'https://websim.ai',
|
|
68
|
-
'user-agent': self.agent.random(),
|
|
69
|
-
'websim-flags;': ''
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
self.session = requests.Session()
|
|
73
|
-
self.session.headers.update(self.headers)
|
|
74
|
-
self.session.proxies.update(proxies)
|
|
75
|
-
|
|
76
|
-
self.is_conversation = is_conversation
|
|
77
|
-
self.max_tokens_to_sample = max_tokens
|
|
78
|
-
self.timeout = timeout
|
|
79
|
-
self.last_response = {}
|
|
80
|
-
self.model = model
|
|
81
|
-
self.aspect_ratio = aspect_ratio
|
|
82
|
-
|
|
83
|
-
self.__available_optimizers = (
|
|
84
|
-
method
|
|
85
|
-
for method in dir(Optimizers)
|
|
86
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
87
|
-
)
|
|
88
|
-
Conversation.intro = (
|
|
89
|
-
AwesomePrompts().get_act(
|
|
90
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
91
|
-
)
|
|
92
|
-
if act
|
|
93
|
-
else intro or Conversation.intro
|
|
94
|
-
)
|
|
95
|
-
|
|
96
|
-
self.conversation = Conversation(
|
|
97
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
98
|
-
)
|
|
99
|
-
self.conversation.history_offset = history_offset
|
|
100
|
-
|
|
101
|
-
def ask(
|
|
102
|
-
self,
|
|
103
|
-
prompt: str,
|
|
104
|
-
stream: bool = False,
|
|
105
|
-
raw: bool = False,
|
|
106
|
-
optimizer: str = None,
|
|
107
|
-
conversationally: bool = False,
|
|
108
|
-
) -> Dict[str, Any]:
|
|
109
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
110
|
-
if optimizer:
|
|
111
|
-
if optimizer in self.__available_optimizers:
|
|
112
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
113
|
-
conversation_prompt if conversationally else prompt
|
|
114
|
-
)
|
|
115
|
-
else:
|
|
116
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
117
|
-
|
|
118
|
-
is_image_request = self.model in self.image_models
|
|
119
|
-
project_id = self.generate_project_id(for_image=is_image_request)
|
|
120
|
-
|
|
121
|
-
if is_image_request:
|
|
122
|
-
self.headers['referer'] = 'https://websim.ai/@ISWEARIAMNOTADDICTEDTOPILLOW/ai-image-prompt-generator'
|
|
123
|
-
return self._handle_image_request(project_id, conversation_prompt)
|
|
124
|
-
else:
|
|
125
|
-
self.headers['referer'] = 'https://websim.ai/@ISWEARIAMNOTADDICTEDTOPILLOW/zelos-ai-assistant'
|
|
126
|
-
return self._handle_chat_request(project_id, conversation_prompt)
|
|
127
|
-
|
|
128
|
-
def _handle_image_request(self, project_id: str, prompt: str) -> Dict[str, Any]:
|
|
129
|
-
try:
|
|
130
|
-
data = {
|
|
131
|
-
"project_id": project_id,
|
|
132
|
-
"prompt": prompt,
|
|
133
|
-
"aspect_ratio": self.aspect_ratio
|
|
134
|
-
}
|
|
135
|
-
response = self.session.post(
|
|
136
|
-
self.image_api_endpoint,
|
|
137
|
-
json=data,
|
|
138
|
-
timeout=self.timeout
|
|
139
|
-
)
|
|
140
|
-
response.raise_for_status()
|
|
141
|
-
response_json = response.json()
|
|
142
|
-
image_url = response_json.get("url")
|
|
143
|
-
if image_url:
|
|
144
|
-
self.last_response = {"text": image_url}
|
|
145
|
-
self.conversation.update_chat_history(prompt, image_url)
|
|
146
|
-
return {"text": image_url}
|
|
147
|
-
raise exceptions.FailedToGenerateResponseError("No image URL found in response")
|
|
148
|
-
except requests.RequestException as e:
|
|
149
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
150
|
-
|
|
151
|
-
def _handle_chat_request(self, project_id: str, prompt: str) -> Dict[str, Any]:
|
|
152
|
-
max_retries = 3
|
|
153
|
-
retry_count = 0
|
|
154
|
-
last_error = None
|
|
155
|
-
|
|
156
|
-
while retry_count < max_retries:
|
|
157
|
-
try:
|
|
158
|
-
data = {
|
|
159
|
-
"project_id": project_id,
|
|
160
|
-
"messages": [{"role": "user", "content": prompt}]
|
|
161
|
-
}
|
|
162
|
-
response = self.session.post(
|
|
163
|
-
self.chat_api_endpoint,
|
|
164
|
-
json=data,
|
|
165
|
-
timeout=self.timeout
|
|
166
|
-
)
|
|
167
|
-
|
|
168
|
-
if response.status_code == 429:
|
|
169
|
-
last_error = exceptions.FailedToGenerateResponseError(
|
|
170
|
-
f"Rate limit exceeded: {response.text}"
|
|
171
|
-
)
|
|
172
|
-
retry_count += 1
|
|
173
|
-
if retry_count < max_retries:
|
|
174
|
-
continue
|
|
175
|
-
raise last_error
|
|
176
|
-
|
|
177
|
-
response.raise_for_status()
|
|
178
|
-
response_json = response.json()
|
|
179
|
-
content = response_json.get("content", "")
|
|
180
|
-
|
|
181
|
-
self.last_response = {"text": content}
|
|
182
|
-
self.conversation.update_chat_history(prompt, content)
|
|
183
|
-
return {"text": content.strip()}
|
|
184
|
-
|
|
185
|
-
except requests.RequestException as e:
|
|
186
|
-
if "Rate limit exceeded" in str(e) and retry_count < max_retries:
|
|
187
|
-
retry_count += 1
|
|
188
|
-
else:
|
|
189
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
190
|
-
|
|
191
|
-
raise last_error or exceptions.FailedToGenerateResponseError("Max retries exceeded")
|
|
192
|
-
|
|
193
|
-
def chat(
|
|
194
|
-
self,
|
|
195
|
-
prompt: str,
|
|
196
|
-
stream: bool = False,
|
|
197
|
-
optimizer: str = None,
|
|
198
|
-
conversationally: bool = False,
|
|
199
|
-
) -> str:
|
|
200
|
-
return self.get_message(
|
|
201
|
-
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
202
|
-
)
|
|
203
|
-
|
|
204
|
-
def get_message(self, response: dict) -> str:
|
|
205
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
206
|
-
return response["text"]
|
|
207
|
-
|
|
208
|
-
if __name__ == "__main__":
|
|
209
|
-
print("-" * 80)
|
|
210
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
211
|
-
print("-" * 80)
|
|
212
|
-
|
|
213
|
-
for model in WebSim.AVAILABLE_MODELS:
|
|
214
|
-
try:
|
|
215
|
-
test_ai = WebSim(model=model, timeout=60)
|
|
216
|
-
response = test_ai.chat("Say 'Hello' in one word")
|
|
217
|
-
|
|
218
|
-
if response and len(response.strip()) > 0:
|
|
219
|
-
status = "✓"
|
|
220
|
-
# Clean and truncate response
|
|
221
|
-
clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
222
|
-
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
223
|
-
else:
|
|
224
|
-
status = "✗"
|
|
225
|
-
display_text = "Empty or invalid response"
|
|
226
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
227
|
-
except Exception as e:
|
|
228
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/labyrinth.py
DELETED
|
@@ -1,340 +0,0 @@
|
|
|
1
|
-
from typing import Union, Any, Dict, Generator
|
|
2
|
-
from uuid import uuid4
|
|
3
|
-
import requests
|
|
4
|
-
import re
|
|
5
|
-
import json
|
|
6
|
-
|
|
7
|
-
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
9
|
-
from webscout.AIutel import AwesomePrompts
|
|
10
|
-
from webscout.AIbase import Provider
|
|
11
|
-
from webscout import exceptions
|
|
12
|
-
from webscout.litagent import LitAgent
|
|
13
|
-
|
|
14
|
-
class LabyrinthAI(Provider):
|
|
15
|
-
"""
|
|
16
|
-
A class to interact with the Labyrinth AI chat API.
|
|
17
|
-
|
|
18
|
-
Attributes:
|
|
19
|
-
system_prompt (str): The system prompt to define the assistant's role.
|
|
20
|
-
|
|
21
|
-
Examples:
|
|
22
|
-
>>> from webscout.Provider.labyrinth import LabyrinthAI
|
|
23
|
-
>>> ai = LabyrinthAI()
|
|
24
|
-
>>> response = ai.chat("What's the weather today?")
|
|
25
|
-
>>> print(response)
|
|
26
|
-
'The weather today is sunny with a high of 75°F.'
|
|
27
|
-
"""
|
|
28
|
-
|
|
29
|
-
# AVAILABLE_MODELS = [
|
|
30
|
-
# "gemini-2.0-flash"
|
|
31
|
-
# ]
|
|
32
|
-
|
|
33
|
-
def __init__(
|
|
34
|
-
self,
|
|
35
|
-
is_conversation: bool = True,
|
|
36
|
-
max_tokens: int = 2049,
|
|
37
|
-
timeout: int = 30,
|
|
38
|
-
intro: str = None,
|
|
39
|
-
filepath: str = None,
|
|
40
|
-
update_file: bool = True,
|
|
41
|
-
proxies: dict = {},
|
|
42
|
-
history_offset: int = 10250,
|
|
43
|
-
act: str = None,
|
|
44
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
45
|
-
# model: str = "gemini-2.0-flash",
|
|
46
|
-
browser: str = "chrome"
|
|
47
|
-
):
|
|
48
|
-
"""
|
|
49
|
-
Initializes the Labyrinth AI API with given parameters.
|
|
50
|
-
|
|
51
|
-
Args:
|
|
52
|
-
is_conversation (bool): Whether the provider is in conversation mode.
|
|
53
|
-
max_tokens (int): Maximum number of tokens to sample.
|
|
54
|
-
timeout (int): Timeout for API requests.
|
|
55
|
-
intro (str): Introduction message for the conversation.
|
|
56
|
-
filepath (str): Filepath for storing conversation history.
|
|
57
|
-
update_file (bool): Whether to update the conversation history file.
|
|
58
|
-
proxies (dict): Proxies for the API requests.
|
|
59
|
-
history_offset (int): Offset for conversation history.
|
|
60
|
-
act (str): Act for the conversation.
|
|
61
|
-
system_prompt (str): The system prompt to define the assistant's role.
|
|
62
|
-
browser (str): Browser type to emulate in the user agent.
|
|
63
|
-
|
|
64
|
-
Examples:
|
|
65
|
-
>>> ai = LabyrinthAI(system_prompt="You are a friendly assistant.")
|
|
66
|
-
>>> print(ai.system_prompt)
|
|
67
|
-
'You are a friendly assistant.'
|
|
68
|
-
"""
|
|
69
|
-
# if model not in self.AVAILABLE_MODELS:
|
|
70
|
-
# raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
71
|
-
|
|
72
|
-
self.url = "https://labyrinth-ebon.vercel.app/api/chat"
|
|
73
|
-
self.system_prompt = system_prompt
|
|
74
|
-
|
|
75
|
-
# Initialize LitAgent for user agent generation
|
|
76
|
-
self.agent = LitAgent()
|
|
77
|
-
# Use fingerprinting to create a consistent browser identity
|
|
78
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
79
|
-
|
|
80
|
-
# Use the fingerprint for headers
|
|
81
|
-
self.headers = {
|
|
82
|
-
"Accept": self.fingerprint["accept"],
|
|
83
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
84
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
85
|
-
"Content-Type": "application/json",
|
|
86
|
-
"Origin": "https://labyrinth-ebon.vercel.app",
|
|
87
|
-
"Cookie": "stock-mode=false; __Host-next-auth.csrf-token=68aa6224f2ff7bbf2c4480a90c49b7b95aaac01a63ed90f3d20a69292c16a366%7C1f6672653c6e304ea971373fecdc3fe491568d014c68cdf3b26ead42f1c6ac62; __Secure-next-auth.callback-url=https%3A%2F%2Flabyrinth-ebon.vercel.app%2F; selectedModel={\"id\":\"gemini-2.0-flash\",\"name\":\"Gemini 2.0 Flash\",\"provider\":\"Google Generative AI\",\"providerId\":\"google\",\"enabled\":true,\"toolCallType\":\"native\",\"searchMode\":true}; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..Z5-1j_rsCWRHY17B.s0lMkhWr0S7a3-4h2p-ce0NJHeNyh8nDyOcsrzFU8AZtBbygGcHKbJ8PzLLQBNL7NwrUwET3fKGbtnAphaVjuSJQfXA0tu69zKJELPw-A3x0Ev6aHJMTG3l9_SweByHyfCSCnGB7tvjwEFsW4c5xs_HzMdPmoRTYyYzlZPuDGhHtQX7WyeUiARc36NfwV-KJYpzXV5-g0VkpsxFEawcfdk6D_S7JtOMmjMTTYuw2BbNYvtlvM-n_XivIctQmQ5Fp65JEE73nr5hWVReyYrkyfUGt4Q.TP8Woa-7Ao05yVCjbbGDug",
|
|
88
|
-
"Referer": "https://labyrinth-ebon.vercel.app/",
|
|
89
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
90
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
91
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
92
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
93
|
-
"Sec-Fetch-Dest": "empty",
|
|
94
|
-
"Sec-Fetch-Mode": "cors",
|
|
95
|
-
"Sec-Fetch-Site": "same-origin",
|
|
96
|
-
"Sec-GPC": "1"
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
self.session = requests.Session()
|
|
100
|
-
self.session.headers.update(self.headers)
|
|
101
|
-
self.session.proxies.update(proxies)
|
|
102
|
-
|
|
103
|
-
self.is_conversation = is_conversation
|
|
104
|
-
self.max_tokens_to_sample = max_tokens
|
|
105
|
-
self.timeout = timeout
|
|
106
|
-
self.last_response = {}
|
|
107
|
-
# self.model = model
|
|
108
|
-
|
|
109
|
-
self.__available_optimizers = (
|
|
110
|
-
method
|
|
111
|
-
for method in dir(Optimizers)
|
|
112
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
113
|
-
)
|
|
114
|
-
Conversation.intro = (
|
|
115
|
-
AwesomePrompts().get_act(
|
|
116
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
117
|
-
)
|
|
118
|
-
if act
|
|
119
|
-
else intro or Conversation.intro
|
|
120
|
-
)
|
|
121
|
-
|
|
122
|
-
self.conversation = Conversation(
|
|
123
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
124
|
-
)
|
|
125
|
-
self.conversation.history_offset = history_offset
|
|
126
|
-
|
|
127
|
-
def refresh_identity(self, browser: str = None):
|
|
128
|
-
"""
|
|
129
|
-
Refreshes the browser identity fingerprint.
|
|
130
|
-
|
|
131
|
-
Args:
|
|
132
|
-
browser: Specific browser to use for the new fingerprint
|
|
133
|
-
"""
|
|
134
|
-
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
135
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
136
|
-
|
|
137
|
-
# Update headers with new fingerprint
|
|
138
|
-
self.headers.update({
|
|
139
|
-
"Accept": self.fingerprint["accept"],
|
|
140
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
141
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
|
|
142
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
143
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
144
|
-
})
|
|
145
|
-
|
|
146
|
-
# Update session headers
|
|
147
|
-
for header, value in self.headers.items():
|
|
148
|
-
self.session.headers[header] = value
|
|
149
|
-
|
|
150
|
-
return self.fingerprint
|
|
151
|
-
|
|
152
|
-
def ask(
|
|
153
|
-
self,
|
|
154
|
-
prompt: str,
|
|
155
|
-
stream: bool = False,
|
|
156
|
-
raw: bool = False,
|
|
157
|
-
optimizer: str = None,
|
|
158
|
-
conversationally: bool = False,
|
|
159
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
160
|
-
"""
|
|
161
|
-
Sends a prompt to the Labyrinth AI API and returns the response.
|
|
162
|
-
|
|
163
|
-
Args:
|
|
164
|
-
prompt (str): The prompt to send to the API.
|
|
165
|
-
stream (bool): Whether to stream the response.
|
|
166
|
-
raw (bool): Whether to return the raw response.
|
|
167
|
-
optimizer (str): Optimizer to use for the prompt.
|
|
168
|
-
conversationally (bool): Whether to generate the prompt conversationally.
|
|
169
|
-
|
|
170
|
-
Returns:
|
|
171
|
-
Union[Dict[str, Any], Generator]: The API response.
|
|
172
|
-
|
|
173
|
-
Examples:
|
|
174
|
-
>>> ai = LabyrinthAI()
|
|
175
|
-
>>> response = ai.ask("Tell me a joke!")
|
|
176
|
-
>>> print(response)
|
|
177
|
-
{'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
|
|
178
|
-
"""
|
|
179
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
180
|
-
if optimizer:
|
|
181
|
-
if optimizer in self.__available_optimizers:
|
|
182
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
183
|
-
conversation_prompt if conversationally else prompt
|
|
184
|
-
)
|
|
185
|
-
else:
|
|
186
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
187
|
-
|
|
188
|
-
# Prepare the request payload
|
|
189
|
-
payload = {
|
|
190
|
-
"id": str(uuid4()),
|
|
191
|
-
"messages": [
|
|
192
|
-
{
|
|
193
|
-
"role": "system",
|
|
194
|
-
"content": self.system_prompt
|
|
195
|
-
},
|
|
196
|
-
{
|
|
197
|
-
"role": "user",
|
|
198
|
-
"content": conversation_prompt,
|
|
199
|
-
"parts": [{"type": "text", "text": conversation_prompt}]
|
|
200
|
-
}
|
|
201
|
-
],
|
|
202
|
-
"stockMode": False
|
|
203
|
-
}
|
|
204
|
-
|
|
205
|
-
def for_stream():
|
|
206
|
-
try:
|
|
207
|
-
with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
|
|
208
|
-
if response.status_code != 200:
|
|
209
|
-
# If we get a non-200 response, try refreshing our identity once
|
|
210
|
-
if response.status_code in [403, 429]:
|
|
211
|
-
self.refresh_identity()
|
|
212
|
-
# Retry with new identity
|
|
213
|
-
with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as retry_response:
|
|
214
|
-
if not retry_response.ok:
|
|
215
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
216
|
-
f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
|
|
217
|
-
)
|
|
218
|
-
response = retry_response
|
|
219
|
-
else:
|
|
220
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
221
|
-
f"Request failed with status code {response.status_code}"
|
|
222
|
-
)
|
|
223
|
-
|
|
224
|
-
streaming_text = ""
|
|
225
|
-
for line in response.iter_lines():
|
|
226
|
-
if line:
|
|
227
|
-
try:
|
|
228
|
-
line = line.decode('utf-8')
|
|
229
|
-
match = re.search(r'0:"(.*?)"', line)
|
|
230
|
-
if match:
|
|
231
|
-
content = match.group(1)
|
|
232
|
-
streaming_text += content
|
|
233
|
-
resp = dict(text=content)
|
|
234
|
-
yield resp if raw else resp
|
|
235
|
-
except UnicodeDecodeError:
|
|
236
|
-
continue
|
|
237
|
-
|
|
238
|
-
self.last_response = {"text": streaming_text}
|
|
239
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
240
|
-
|
|
241
|
-
except requests.RequestException as e:
|
|
242
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
243
|
-
|
|
244
|
-
def for_non_stream():
|
|
245
|
-
try:
|
|
246
|
-
response = self.session.post(self.url, json=payload, timeout=self.timeout)
|
|
247
|
-
if response.status_code != 200:
|
|
248
|
-
if response.status_code in [403, 429]:
|
|
249
|
-
self.refresh_identity()
|
|
250
|
-
response = self.session.post(self.url, json=payload, timeout=self.timeout)
|
|
251
|
-
if not response.ok:
|
|
252
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
253
|
-
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
|
|
254
|
-
)
|
|
255
|
-
else:
|
|
256
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
257
|
-
f"Request failed with status code {response.status_code}"
|
|
258
|
-
)
|
|
259
|
-
|
|
260
|
-
full_response = ""
|
|
261
|
-
for line in response.iter_lines():
|
|
262
|
-
if line:
|
|
263
|
-
try:
|
|
264
|
-
line = line.decode('utf-8')
|
|
265
|
-
match = re.search(r'0:"(.*?)"', line)
|
|
266
|
-
if match:
|
|
267
|
-
content = match.group(1)
|
|
268
|
-
full_response += content
|
|
269
|
-
except UnicodeDecodeError:
|
|
270
|
-
continue
|
|
271
|
-
|
|
272
|
-
self.last_response = {"text": full_response}
|
|
273
|
-
self.conversation.update_chat_history(prompt, full_response)
|
|
274
|
-
return {"text": full_response}
|
|
275
|
-
except Exception as e:
|
|
276
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
277
|
-
|
|
278
|
-
return for_stream() if stream else for_non_stream()
|
|
279
|
-
|
|
280
|
-
def chat(
|
|
281
|
-
self,
|
|
282
|
-
prompt: str,
|
|
283
|
-
stream: bool = False,
|
|
284
|
-
optimizer: str = None,
|
|
285
|
-
conversationally: bool = False,
|
|
286
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
287
|
-
"""
|
|
288
|
-
Generates a response from the Labyrinth AI API.
|
|
289
|
-
|
|
290
|
-
Args:
|
|
291
|
-
prompt (str): The prompt to send to the API.
|
|
292
|
-
stream (bool): Whether to stream the response.
|
|
293
|
-
optimizer (str): Optimizer to use for the prompt.
|
|
294
|
-
conversationally (bool): Whether to generate the prompt conversationally.
|
|
295
|
-
|
|
296
|
-
Returns:
|
|
297
|
-
Union[str, Generator[str, None, None]]: The API response.
|
|
298
|
-
|
|
299
|
-
Examples:
|
|
300
|
-
>>> ai = LabyrinthAI()
|
|
301
|
-
>>> response = ai.chat("What's the weather today?")
|
|
302
|
-
>>> print(response)
|
|
303
|
-
'The weather today is sunny with a high of 75°F.'
|
|
304
|
-
"""
|
|
305
|
-
def for_stream():
|
|
306
|
-
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
307
|
-
yield self.get_message(response)
|
|
308
|
-
def for_non_stream():
|
|
309
|
-
return self.get_message(
|
|
310
|
-
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
311
|
-
)
|
|
312
|
-
return for_stream() if stream else for_non_stream()
|
|
313
|
-
|
|
314
|
-
def get_message(self, response: dict) -> str:
|
|
315
|
-
"""
|
|
316
|
-
Extracts the message from the API response.
|
|
317
|
-
|
|
318
|
-
Args:
|
|
319
|
-
response (dict): The API response.
|
|
320
|
-
|
|
321
|
-
Returns:
|
|
322
|
-
str: The message content.
|
|
323
|
-
|
|
324
|
-
Examples:
|
|
325
|
-
>>> ai = LabyrinthAI()
|
|
326
|
-
>>> response = ai.ask("Tell me a joke!")
|
|
327
|
-
>>> message = ai.get_message(response)
|
|
328
|
-
>>> print(message)
|
|
329
|
-
'Why did the scarecrow win an award? Because he was outstanding in his field!'
|
|
330
|
-
"""
|
|
331
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
332
|
-
formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
333
|
-
return formatted_text
|
|
334
|
-
|
|
335
|
-
if __name__ == "__main__":
|
|
336
|
-
from rich import print
|
|
337
|
-
ai = LabyrinthAI()
|
|
338
|
-
resp = ai.chat("What is the capital of France?", stream=True)
|
|
339
|
-
for message in resp:
|
|
340
|
-
print(message, end='', flush=True)
|