webscout 6.0__py3-none-any.whl → 6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +77 -259
- webscout/Agents/Onlinesearcher.py +22 -10
- webscout/Agents/functioncall.py +2 -2
- webscout/Bard.py +21 -21
- webscout/Extra/autollama.py +37 -20
- webscout/Local/__init__.py +6 -7
- webscout/Local/formats.py +406 -194
- webscout/Local/model.py +1074 -477
- webscout/Local/samplers.py +108 -144
- webscout/Local/thread.py +251 -410
- webscout/Local/ui.py +401 -0
- webscout/Local/utils.py +338 -136
- webscout/Provider/Amigo.py +51 -38
- webscout/Provider/Deepseek.py +7 -6
- webscout/Provider/EDITEE.py +2 -2
- webscout/Provider/GPTWeb.py +1 -1
- webscout/Provider/Llama3.py +1 -1
- webscout/Provider/NinjaChat.py +200 -0
- webscout/Provider/OLLAMA.py +1 -1
- webscout/Provider/Perplexity.py +1 -1
- webscout/Provider/Reka.py +12 -5
- webscout/Provider/TTI/AIuncensored.py +103 -0
- webscout/Provider/TTI/Nexra.py +3 -3
- webscout/Provider/TTI/__init__.py +4 -2
- webscout/Provider/TTI/aiforce.py +2 -2
- webscout/Provider/TTI/imgninza.py +136 -0
- webscout/Provider/TTI/talkai.py +116 -0
- webscout/Provider/TeachAnything.py +0 -3
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +16 -12
- webscout/Provider/{ChatHub.py → aimathgpt.py} +72 -88
- webscout/Provider/cerebras.py +143 -123
- webscout/Provider/cleeai.py +1 -1
- webscout/Provider/felo_search.py +1 -1
- webscout/Provider/gaurish.py +207 -0
- webscout/Provider/geminiprorealtime.py +160 -0
- webscout/Provider/genspark.py +1 -1
- webscout/Provider/julius.py +8 -3
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/{aigames.py → llmchat.py} +74 -84
- webscout/Provider/promptrefine.py +3 -1
- webscout/Provider/talkai.py +196 -0
- webscout/Provider/turboseek.py +3 -8
- webscout/Provider/tutorai.py +1 -1
- webscout/__init__.py +2 -43
- webscout/exceptions.py +5 -1
- webscout/tempid.py +4 -73
- webscout/utils.py +3 -0
- webscout/version.py +1 -1
- webscout/webai.py +1 -1
- webscout/webscout_search.py +154 -123
- {webscout-6.0.dist-info → webscout-6.2.dist-info}/METADATA +164 -245
- {webscout-6.0.dist-info → webscout-6.2.dist-info}/RECORD +57 -55
- webscout/Local/rawdog.py +0 -946
- webscout/Provider/BasedGPT.py +0 -214
- webscout/Provider/TTI/amigo.py +0 -148
- webscout/Provider/bixin.py +0 -264
- webscout/Provider/xdash.py +0 -182
- webscout/websx_search.py +0 -19
- {webscout-6.0.dist-info → webscout-6.2.dist-info}/LICENSE.md +0 -0
- {webscout-6.0.dist-info → webscout-6.2.dist-info}/WHEEL +0 -0
- {webscout-6.0.dist-info → webscout-6.2.dist-info}/entry_points.txt +0 -0
- {webscout-6.0.dist-info → webscout-6.2.dist-info}/top_level.txt +0 -0
webscout/Provider/cerebras.py
CHANGED
|
@@ -1,22 +1,24 @@
|
|
|
1
|
-
import
|
|
1
|
+
import re
|
|
2
2
|
import requests
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from typing import Any, Dict, Optional, Generator, List, Union
|
|
3
6
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
4
7
|
from webscout.AIbase import Provider
|
|
5
8
|
from webscout import exceptions
|
|
6
|
-
from
|
|
9
|
+
from fake_useragent import UserAgent
|
|
10
|
+
from cerebras.cloud.sdk import Cerebras as CerebrasSDK
|
|
11
|
+
|
|
7
12
|
|
|
8
13
|
class Cerebras(Provider):
|
|
9
14
|
"""
|
|
10
|
-
A class to interact with the Cerebras
|
|
15
|
+
A class to interact with the Cerebras API using a cookie for authentication.
|
|
11
16
|
"""
|
|
12
17
|
|
|
13
|
-
AVAILABLE_MODELS = ["llama3.1-8b", "llama3.1-70b"]
|
|
14
|
-
|
|
15
18
|
def __init__(
|
|
16
19
|
self,
|
|
17
|
-
api_key: str,
|
|
18
20
|
is_conversation: bool = True,
|
|
19
|
-
max_tokens: int =
|
|
21
|
+
max_tokens: int = 2049,
|
|
20
22
|
timeout: int = 30,
|
|
21
23
|
intro: str = None,
|
|
22
24
|
filepath: str = None,
|
|
@@ -24,47 +26,36 @@ class Cerebras(Provider):
|
|
|
24
26
|
proxies: dict = {},
|
|
25
27
|
history_offset: int = 10250,
|
|
26
28
|
act: str = None,
|
|
29
|
+
cookie_path: str = "cookie.json",
|
|
27
30
|
model: str = "llama3.1-8b",
|
|
28
|
-
system_prompt: str = "
|
|
31
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
29
32
|
):
|
|
30
|
-
|
|
31
|
-
Initializes the Cerebras AI API with given parameters.
|
|
32
|
-
"""
|
|
33
|
-
if model not in self.AVAILABLE_MODELS:
|
|
34
|
-
raise ValueError(f"Invalid model: {model}. Available models are: {', '.join(self.AVAILABLE_MODELS)}")
|
|
35
|
-
|
|
36
|
-
self.session = requests.Session()
|
|
37
|
-
self.is_conversation = is_conversation
|
|
38
|
-
self.max_tokens_to_sample = max_tokens
|
|
39
|
-
self.api_endpoint = "https://api.cerebras.ai/v1/chat/completions"
|
|
33
|
+
# Initialize basic settings first
|
|
40
34
|
self.timeout = timeout
|
|
41
|
-
self.last_response = {}
|
|
42
35
|
self.model = model
|
|
43
36
|
self.system_prompt = system_prompt
|
|
44
|
-
self.
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
48
|
-
"authorization": f"Bearer {api_key}",
|
|
49
|
-
"content-type": "application/json",
|
|
50
|
-
"dnt": "1",
|
|
51
|
-
"origin": "https://inference.cerebras.ai",
|
|
52
|
-
"referer": "https://inference.cerebras.ai/",
|
|
53
|
-
"sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
54
|
-
"sec-ch-ua-mobile": "?0",
|
|
55
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
56
|
-
"sec-fetch-dest": "empty",
|
|
57
|
-
"sec-fetch-mode": "cors",
|
|
58
|
-
"sec-fetch-site": "same-site",
|
|
59
|
-
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
|
|
60
|
-
}
|
|
37
|
+
self.is_conversation = is_conversation
|
|
38
|
+
self.max_tokens_to_sample = max_tokens
|
|
39
|
+
self.last_response = {}
|
|
61
40
|
|
|
41
|
+
# Get API key first
|
|
42
|
+
try:
|
|
43
|
+
self.api_key = self.get_demo_api_key(cookie_path)
|
|
44
|
+
# Set environment variable for the SDK
|
|
45
|
+
os.environ["CEREBRAS_API_KEY"] = self.api_key
|
|
46
|
+
# Initialize the client with the API key
|
|
47
|
+
self.client = CerebrasSDK(api_key=self.api_key)
|
|
48
|
+
except Exception as e:
|
|
49
|
+
raise exceptions.APIConnectionError(f"Failed to initialize Cerebras client: {e}")
|
|
50
|
+
|
|
51
|
+
# Initialize optimizers
|
|
62
52
|
self.__available_optimizers = (
|
|
63
53
|
method
|
|
64
54
|
for method in dir(Optimizers)
|
|
65
55
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
66
56
|
)
|
|
67
|
-
|
|
57
|
+
|
|
58
|
+
# Initialize conversation settings
|
|
68
59
|
Conversation.intro = (
|
|
69
60
|
AwesomePrompts().get_act(
|
|
70
61
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -76,7 +67,59 @@ class Cerebras(Provider):
|
|
|
76
67
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
77
68
|
)
|
|
78
69
|
self.conversation.history_offset = history_offset
|
|
79
|
-
|
|
70
|
+
|
|
71
|
+
@staticmethod
|
|
72
|
+
def extract_query(text: str) -> str:
|
|
73
|
+
"""Extracts the first code block from the given text."""
|
|
74
|
+
pattern = r"```(.*?)```"
|
|
75
|
+
matches = re.findall(pattern, text, re.DOTALL)
|
|
76
|
+
return matches[0].strip() if matches else text.strip()
|
|
77
|
+
|
|
78
|
+
@staticmethod
|
|
79
|
+
def refiner(text: str) -> str:
|
|
80
|
+
"""Refines the input text by removing surrounding quotes."""
|
|
81
|
+
return text.strip('"')
|
|
82
|
+
|
|
83
|
+
def get_demo_api_key(self, cookie_path: str) -> str:
|
|
84
|
+
"""Retrieves the demo API key using the provided cookie."""
|
|
85
|
+
try:
|
|
86
|
+
with open(cookie_path, "r") as file:
|
|
87
|
+
cookies = {item["name"]: item["value"] for item in json.load(file)}
|
|
88
|
+
except FileNotFoundError:
|
|
89
|
+
raise FileNotFoundError(f"Cookie file not found at path: {cookie_path}")
|
|
90
|
+
except json.JSONDecodeError:
|
|
91
|
+
raise json.JSONDecodeError("Invalid JSON format in the cookie file.", "", 0)
|
|
92
|
+
|
|
93
|
+
headers = {
|
|
94
|
+
"Accept": "*/*",
|
|
95
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
96
|
+
"Content-Type": "application/json",
|
|
97
|
+
"Origin": "https://inference.cerebras.ai",
|
|
98
|
+
"Referer": "https://inference.cerebras.ai/",
|
|
99
|
+
"user-agent": UserAgent().random,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
json_data = {
|
|
103
|
+
"operationName": "GetMyDemoApiKey",
|
|
104
|
+
"variables": {},
|
|
105
|
+
"query": "query GetMyDemoApiKey {\n GetMyDemoApiKey\n}",
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
try:
|
|
109
|
+
response = requests.post(
|
|
110
|
+
"https://inference.cerebras.ai/api/graphql",
|
|
111
|
+
cookies=cookies,
|
|
112
|
+
headers=headers,
|
|
113
|
+
json=json_data,
|
|
114
|
+
timeout=self.timeout,
|
|
115
|
+
)
|
|
116
|
+
response.raise_for_status()
|
|
117
|
+
api_key = response.json()["data"]["GetMyDemoApiKey"]
|
|
118
|
+
return api_key
|
|
119
|
+
except requests.exceptions.RequestException as e:
|
|
120
|
+
raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
|
|
121
|
+
except KeyError:
|
|
122
|
+
raise exceptions.InvalidResponseError("API key not found in response.")
|
|
80
123
|
|
|
81
124
|
def ask(
|
|
82
125
|
self,
|
|
@@ -85,7 +128,8 @@ class Cerebras(Provider):
|
|
|
85
128
|
raw: bool = False,
|
|
86
129
|
optimizer: str = None,
|
|
87
130
|
conversationally: bool = False,
|
|
88
|
-
) -> Dict
|
|
131
|
+
) -> Union[Dict, Generator]:
|
|
132
|
+
"""Send a prompt to the model and get a response."""
|
|
89
133
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
90
134
|
if optimizer:
|
|
91
135
|
if optimizer in self.__available_optimizers:
|
|
@@ -93,62 +137,51 @@ class Cerebras(Provider):
|
|
|
93
137
|
conversation_prompt if conversationally else prompt
|
|
94
138
|
)
|
|
95
139
|
else:
|
|
96
|
-
raise Exception(
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
"
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
response = self.
|
|
114
|
-
|
|
140
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
141
|
+
|
|
142
|
+
messages = [
|
|
143
|
+
{"content": self.system_prompt, "role": "system"},
|
|
144
|
+
{"content": conversation_prompt, "role": "user"},
|
|
145
|
+
]
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
if stream:
|
|
149
|
+
return self._handle_stream_response(messages)
|
|
150
|
+
return self._handle_normal_response(messages)
|
|
151
|
+
except Exception as e:
|
|
152
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
|
|
153
|
+
|
|
154
|
+
def _handle_stream_response(self, messages: List[Dict]) -> Generator:
|
|
155
|
+
"""Handle streaming response from the model."""
|
|
156
|
+
try:
|
|
157
|
+
response = self.client.chat.completions.create(
|
|
158
|
+
messages=messages,
|
|
159
|
+
model=self.model,
|
|
160
|
+
stream=True
|
|
115
161
|
)
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
else:
|
|
135
|
-
break
|
|
136
|
-
|
|
137
|
-
self.last_response.update(dict(text=full_response))
|
|
138
|
-
self.conversation.update_chat_history(
|
|
139
|
-
prompt, self.get_message(self.last_response)
|
|
162
|
+
|
|
163
|
+
for choice in response.choices:
|
|
164
|
+
if hasattr(choice, 'delta') and hasattr(choice.delta, 'content') and choice.delta.content:
|
|
165
|
+
yield dict(text=choice.delta.content)
|
|
166
|
+
|
|
167
|
+
# Update last response with the complete message
|
|
168
|
+
if hasattr(response.choices[0], 'message'):
|
|
169
|
+
self.last_response.update({"text": response.choices[0].message.content})
|
|
170
|
+
|
|
171
|
+
except Exception as e:
|
|
172
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during streaming: {e}")
|
|
173
|
+
|
|
174
|
+
def _handle_normal_response(self, messages: List[Dict]) -> Dict:
|
|
175
|
+
"""Handle normal (non-streaming) response from the model."""
|
|
176
|
+
try:
|
|
177
|
+
response = self.client.chat.completions.create(
|
|
178
|
+
messages=messages,
|
|
179
|
+
model=self.model
|
|
140
180
|
)
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
if isinstance(chunk, dict):
|
|
146
|
-
full_response += chunk['text']
|
|
147
|
-
else:
|
|
148
|
-
full_response += chunk
|
|
149
|
-
return dict(text=full_response)
|
|
150
|
-
|
|
151
|
-
return for_stream() if stream else for_non_stream()
|
|
181
|
+
self.last_response.update({"text": response.choices[0].message.content})
|
|
182
|
+
return self.last_response
|
|
183
|
+
except Exception as e:
|
|
184
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during response: {e}")
|
|
152
185
|
|
|
153
186
|
def chat(
|
|
154
187
|
self,
|
|
@@ -156,44 +189,31 @@ class Cerebras(Provider):
|
|
|
156
189
|
stream: bool = False,
|
|
157
190
|
optimizer: str = None,
|
|
158
191
|
conversationally: bool = False,
|
|
159
|
-
) -> str:
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
yield self.get_message(response)
|
|
165
|
-
|
|
166
|
-
def for_non_stream():
|
|
167
|
-
return self.get_message(
|
|
168
|
-
self.ask(
|
|
169
|
-
prompt,
|
|
170
|
-
False,
|
|
171
|
-
optimizer=optimizer,
|
|
172
|
-
conversationally=conversationally,
|
|
173
|
-
)
|
|
192
|
+
) -> Union[str, Generator]:
|
|
193
|
+
"""High-level method to chat with the model."""
|
|
194
|
+
return self.get_message(
|
|
195
|
+
self.ask(
|
|
196
|
+
prompt, stream, optimizer=optimizer, conversationally=conversationally
|
|
174
197
|
)
|
|
175
|
-
|
|
176
|
-
return for_stream() if stream else for_non_stream()
|
|
198
|
+
)
|
|
177
199
|
|
|
178
200
|
def get_message(self, response: dict) -> str:
|
|
179
|
-
"""Retrieves message
|
|
180
|
-
|
|
181
|
-
Args:
|
|
182
|
-
response (dict): Response generated by `self.ask`
|
|
183
|
-
|
|
184
|
-
Returns:
|
|
185
|
-
str: Message extracted
|
|
186
|
-
"""
|
|
201
|
+
"""Retrieves message from response."""
|
|
187
202
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
188
203
|
return response["text"]
|
|
189
204
|
|
|
190
|
-
|
|
205
|
+
|
|
206
|
+
if __name__ == "__main__":
|
|
191
207
|
from rich import print
|
|
192
208
|
|
|
193
|
-
#
|
|
194
|
-
|
|
209
|
+
# Example usage
|
|
210
|
+
cerebras = Cerebras(
|
|
211
|
+
cookie_path='cookie.json',
|
|
212
|
+
model='llama3.1-8b',
|
|
213
|
+
system_prompt="You are a helpful AI assistant."
|
|
214
|
+
)
|
|
195
215
|
|
|
196
|
-
|
|
197
|
-
response =
|
|
216
|
+
# Test with streaming
|
|
217
|
+
response = cerebras.chat("What is the meaning of life?", stream=True)
|
|
198
218
|
for chunk in response:
|
|
199
219
|
print(chunk, end="", flush=True)
|
webscout/Provider/cleeai.py
CHANGED
|
@@ -207,6 +207,6 @@ class Cleeai(Provider):
|
|
|
207
207
|
if __name__ == "__main__":
|
|
208
208
|
from rich import print
|
|
209
209
|
ai = Cleeai(timeout=5000)
|
|
210
|
-
response = ai.chat("
|
|
210
|
+
response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
|
|
211
211
|
for chunk in response:
|
|
212
212
|
print(chunk, end="", flush=True)
|
webscout/Provider/felo_search.py
CHANGED
|
@@ -175,6 +175,6 @@ class Felo(Provider):
|
|
|
175
175
|
if __name__ == '__main__':
|
|
176
176
|
from rich import print
|
|
177
177
|
ai = Felo()
|
|
178
|
-
response = ai.chat("
|
|
178
|
+
response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
|
|
179
179
|
for chunk in response:
|
|
180
180
|
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, List, Union
|
|
5
|
+
import uuid
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class GaurishCerebras(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the Gaurish Cerebras API.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
is_conversation: bool = True,
|
|
22
|
+
max_tokens: int = 2049,
|
|
23
|
+
timeout: int = 30,
|
|
24
|
+
intro: str = None,
|
|
25
|
+
filepath: str = None,
|
|
26
|
+
update_file: bool = True,
|
|
27
|
+
proxies: dict = {},
|
|
28
|
+
history_offset: int = 10250,
|
|
29
|
+
act: str = None,
|
|
30
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
31
|
+
):
|
|
32
|
+
"""Initializes the Gaurish Cerebras API client."""
|
|
33
|
+
self.url = "https://proxy.gaurish.xyz/api/cerebras/v1/chat/completions"
|
|
34
|
+
self.headers = {
|
|
35
|
+
"Content-Type": "application/json",
|
|
36
|
+
"Accept": "text/event-stream",
|
|
37
|
+
"access-control-allow-credentials": "true",
|
|
38
|
+
"access-control-allow-headers": "*",
|
|
39
|
+
"access-control-allow-methods": "*",
|
|
40
|
+
"access-control-allow-origin": "*",
|
|
41
|
+
"cache-control": "public, max-age=0, must-revalidate",
|
|
42
|
+
"referrer-policy": "strict-origin-when-cross-origin",
|
|
43
|
+
"content-type": "text/event-stream; charset=utf-8",
|
|
44
|
+
"strict-transport-security": "max-age=3600; includeSubDomains",
|
|
45
|
+
"x-content-type-options": "nosniff",
|
|
46
|
+
"x-matched-path": "/api/cerebras/[...path]",
|
|
47
|
+
"x-ratelimit-limit-requests-day": "30000",
|
|
48
|
+
"x-ratelimit-limit-tokens-minute": "60000",
|
|
49
|
+
"x-ratelimit-remaining-requests-day": "29984",
|
|
50
|
+
"x-ratelimit-remaining-tokens-minute": "60000",
|
|
51
|
+
"x-ratelimit-reset-requests-day": "24092.23299384117",
|
|
52
|
+
"x-ratelimit-reset-tokens-minute": "32.232993841171265",
|
|
53
|
+
"x-request-id": "0vWYzSEvd9Ytk5Zvl8NGRfT_Ekjm0ErInwwxlihBPyqUBAjJpyXwCg==",
|
|
54
|
+
"x-vercel-id": "bom1::nsbfd-1729703907288-16e74bb1db50",
|
|
55
|
+
"accept": "application/json",
|
|
56
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
57
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
58
|
+
"dnt": "1",
|
|
59
|
+
"origin": "https://chat.gaurish.xyz",
|
|
60
|
+
"priority": "u=1, i",
|
|
61
|
+
"referer": "https://chat.gaurish.xyz/",
|
|
62
|
+
"sec-ch-ua": "\"Chromium\";v=\"130\", \"Microsoft Edge\";v=\"130\", \"Not?A_Brand\";v=\"99\"",
|
|
63
|
+
"sec-ch-ua-mobile": "?0",
|
|
64
|
+
"sec-ch-ua-platform": "\"Windows\"",
|
|
65
|
+
"sec-fetch-dest": "empty",
|
|
66
|
+
"sec-fetch-mode": "cors",
|
|
67
|
+
"sec-fetch-site": "same-site",
|
|
68
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0",
|
|
69
|
+
"x-stainless-arch": "unknown",
|
|
70
|
+
"x-stainless-lang": "js",
|
|
71
|
+
"x-stainless-os": "Unknown",
|
|
72
|
+
"x-stainless-package-version": "4.67.3",
|
|
73
|
+
"x-stainless-retry-count": "0",
|
|
74
|
+
"x-stainless-runtime": "browser:chrome",
|
|
75
|
+
"x-stainless-runtime-version": "130.0.0",
|
|
76
|
+
}
|
|
77
|
+
self.session = requests.Session()
|
|
78
|
+
self.session.headers.update(self.headers)
|
|
79
|
+
self.session.proxies.update(proxies)
|
|
80
|
+
self.timeout = timeout
|
|
81
|
+
self.last_response = {}
|
|
82
|
+
|
|
83
|
+
self.is_conversation = is_conversation
|
|
84
|
+
self.max_tokens_to_sample = max_tokens
|
|
85
|
+
self.__available_optimizers = (
|
|
86
|
+
method
|
|
87
|
+
for method in dir(Optimizers)
|
|
88
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
89
|
+
)
|
|
90
|
+
Conversation.intro = (
|
|
91
|
+
AwesomePrompts().get_act(
|
|
92
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
93
|
+
)
|
|
94
|
+
if act
|
|
95
|
+
else intro or system_prompt or Conversation.intro
|
|
96
|
+
)
|
|
97
|
+
self.conversation = Conversation(
|
|
98
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
99
|
+
)
|
|
100
|
+
self.conversation.history_offset = history_offset
|
|
101
|
+
self.system_prompt = system_prompt # Store the system prompt
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def ask(
|
|
105
|
+
self,
|
|
106
|
+
prompt: str,
|
|
107
|
+
stream: bool = False,
|
|
108
|
+
raw: bool = False,
|
|
109
|
+
optimizer: str = None,
|
|
110
|
+
conversationally: bool = False,
|
|
111
|
+
) -> Union[Dict, Generator]:
|
|
112
|
+
|
|
113
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
114
|
+
if optimizer:
|
|
115
|
+
if optimizer in self.__available_optimizers:
|
|
116
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
117
|
+
conversation_prompt if conversationally else prompt
|
|
118
|
+
)
|
|
119
|
+
else:
|
|
120
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
121
|
+
|
|
122
|
+
payload = {
|
|
123
|
+
"messages": [
|
|
124
|
+
{"role": "system", "content": self.system_prompt},
|
|
125
|
+
{"role": "user", "content": conversation_prompt},
|
|
126
|
+
],
|
|
127
|
+
"model": "llama3.1-70b",
|
|
128
|
+
"temperature": 0.75,
|
|
129
|
+
"stream": stream,
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
def for_stream():
|
|
133
|
+
try:
|
|
134
|
+
with self.session.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
|
|
135
|
+
response.raise_for_status()
|
|
136
|
+
streaming_text = ""
|
|
137
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
138
|
+
if line:
|
|
139
|
+
line = line.strip()
|
|
140
|
+
if line.startswith("data: "):
|
|
141
|
+
line = line[6:]
|
|
142
|
+
if line == "[DONE]":
|
|
143
|
+
break
|
|
144
|
+
try:
|
|
145
|
+
data = json.loads(line)
|
|
146
|
+
if "choices" in data and data["choices"][0]["delta"].get("content"):
|
|
147
|
+
content = data["choices"][0]["delta"]["content"]
|
|
148
|
+
streaming_text += content
|
|
149
|
+
resp = dict(text=content) # Yield only the new content
|
|
150
|
+
yield resp if raw else resp
|
|
151
|
+
except json.JSONDecodeError:
|
|
152
|
+
# print(f"[Warning] Invalid JSON chunk received: {line}")
|
|
153
|
+
pass
|
|
154
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
155
|
+
self.last_response.update({"text": streaming_text})
|
|
156
|
+
|
|
157
|
+
except requests.exceptions.RequestException as e:
|
|
158
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def for_non_stream():
|
|
162
|
+
for _ in for_stream():
|
|
163
|
+
pass
|
|
164
|
+
return self.last_response
|
|
165
|
+
|
|
166
|
+
return for_stream() if stream else for_non_stream()
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def chat(
|
|
171
|
+
self,
|
|
172
|
+
prompt: str,
|
|
173
|
+
stream: bool = False,
|
|
174
|
+
optimizer: str = None,
|
|
175
|
+
conversationally: bool = False,
|
|
176
|
+
) -> Union[str, Generator]:
|
|
177
|
+
|
|
178
|
+
def for_stream():
|
|
179
|
+
for response in self.ask(
|
|
180
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
181
|
+
):
|
|
182
|
+
yield self.get_message(response)
|
|
183
|
+
|
|
184
|
+
def for_non_stream():
|
|
185
|
+
return self.get_message(
|
|
186
|
+
self.ask(
|
|
187
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
188
|
+
)
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
return for_stream() if stream else for_non_stream()
|
|
192
|
+
|
|
193
|
+
def get_message(self, response: dict) -> str:
|
|
194
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
195
|
+
return response["text"]
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
if __name__ == "__main__":
|
|
200
|
+
from rich import print
|
|
201
|
+
bot = GaurishCerebras()
|
|
202
|
+
try:
|
|
203
|
+
response = bot.chat("What is the capital of France?", stream=True)
|
|
204
|
+
for chunk in response:
|
|
205
|
+
print(chunk, end="", flush=True)
|
|
206
|
+
except Exception as e:
|
|
207
|
+
print(f"An error occurred: {e}")
|