webscout 5.9__py3-none-any.whl → 6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Agents/Onlinesearcher.py +22 -10
- webscout/Agents/functioncall.py +2 -2
- webscout/Bard.py +21 -21
- webscout/Local/__init__.py +6 -7
- webscout/Local/formats.py +404 -194
- webscout/Local/model.py +1074 -477
- webscout/Local/samplers.py +108 -144
- webscout/Local/thread.py +251 -410
- webscout/Local/ui.py +401 -0
- webscout/Local/utils.py +308 -131
- webscout/Provider/Amigo.py +5 -3
- webscout/Provider/ChatHub.py +209 -0
- webscout/Provider/Chatify.py +3 -3
- webscout/Provider/Cloudflare.py +3 -3
- webscout/Provider/DARKAI.py +1 -1
- webscout/Provider/Deepinfra.py +95 -389
- webscout/Provider/Deepseek.py +4 -6
- webscout/Provider/DiscordRocks.py +3 -3
- webscout/Provider/Free2GPT.py +3 -3
- webscout/Provider/NinjaChat.py +200 -0
- webscout/Provider/OLLAMA.py +4 -4
- webscout/Provider/RUBIKSAI.py +3 -3
- webscout/Provider/TTI/Nexra.py +3 -3
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiforce.py +2 -2
- webscout/Provider/TTI/imgninza.py +136 -0
- webscout/Provider/Youchat.py +4 -5
- webscout/Provider/__init__.py +13 -6
- webscout/Provider/ai4chat.py +3 -2
- webscout/Provider/aimathgpt.py +193 -0
- webscout/Provider/bagoodex.py +145 -0
- webscout/Provider/bixin.py +3 -3
- webscout/Provider/cleeai.py +3 -3
- webscout/Provider/elmo.py +2 -5
- webscout/Provider/felo_search.py +1 -1
- webscout/Provider/gaurish.py +168 -0
- webscout/Provider/geminiprorealtime.py +160 -0
- webscout/Provider/julius.py +10 -40
- webscout/Provider/llamatutor.py +2 -2
- webscout/Provider/prefind.py +3 -3
- webscout/Provider/promptrefine.py +3 -3
- webscout/Provider/turboseek.py +1 -1
- webscout/Provider/twitterclone.py +25 -41
- webscout/Provider/upstage.py +3 -3
- webscout/Provider/x0gpt.py +6 -6
- webscout/exceptions.py +5 -1
- webscout/utils.py +3 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +154 -123
- {webscout-5.9.dist-info → webscout-6.1.dist-info}/METADATA +132 -157
- {webscout-5.9.dist-info → webscout-6.1.dist-info}/RECORD +55 -49
- {webscout-5.9.dist-info → webscout-6.1.dist-info}/WHEEL +1 -1
- webscout/Local/rawdog.py +0 -946
- webscout/Provider/Poe.py +0 -208
- {webscout-5.9.dist-info → webscout-6.1.dist-info}/LICENSE.md +0 -0
- {webscout-5.9.dist-info → webscout-6.1.dist-info}/entry_points.txt +0 -0
- {webscout-5.9.dist-info → webscout-6.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, List, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AIMathGPT(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the AIMathGPT API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
is_conversation: bool = True,
|
|
21
|
+
max_tokens: int = 2049,
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
intro: str = None,
|
|
24
|
+
filepath: str = None,
|
|
25
|
+
update_file: bool = True,
|
|
26
|
+
proxies: dict = {},
|
|
27
|
+
history_offset: int = 10250,
|
|
28
|
+
act: str = None,
|
|
29
|
+
model: str = "llama3", # Default model
|
|
30
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
31
|
+
):
|
|
32
|
+
"""
|
|
33
|
+
Initializes the AIMathGPT API with the given parameters.
|
|
34
|
+
"""
|
|
35
|
+
self.url = "https://aimathgpt.forit.ai/api/ai"
|
|
36
|
+
self.headers = {
|
|
37
|
+
"authority": "aimathgpt.forit.ai",
|
|
38
|
+
"method": "POST",
|
|
39
|
+
"path": "/api/ai",
|
|
40
|
+
"scheme": "https",
|
|
41
|
+
"accept": "*/*",
|
|
42
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
43
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
44
|
+
"content-type": "application/json",
|
|
45
|
+
"cookie": (
|
|
46
|
+
"NEXT_LOCALE=en; _ga=GA1.1.1515823701.1726936796; "
|
|
47
|
+
"_ga_1F3ZVN96B1=GS1.1.1726936795.1.1.1726936833.0.0.0"
|
|
48
|
+
),
|
|
49
|
+
"dnt": "1",
|
|
50
|
+
"origin": "https://aimathgpt.forit.ai",
|
|
51
|
+
"priority": "u=1, i",
|
|
52
|
+
"referer": "https://aimathgpt.forit.ai/?ref=taaft&utm_source=taaft&utm_medium=referral",
|
|
53
|
+
"sec-ch-ua": (
|
|
54
|
+
"\"Microsoft Edge\";v=\"129\", \"Not=A?Brand\";v=\"8\", \"Chromium\";v=\"129\""
|
|
55
|
+
),
|
|
56
|
+
"sec-ch-ua-mobile": "?0",
|
|
57
|
+
"sec-ch-ua-platform": "\"Windows\"",
|
|
58
|
+
"sec-fetch-dest": "empty",
|
|
59
|
+
"sec-fetch-mode": "cors",
|
|
60
|
+
"sec-fetch-site": "same-origin",
|
|
61
|
+
"user-agent": (
|
|
62
|
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
63
|
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
64
|
+
"Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"
|
|
65
|
+
),
|
|
66
|
+
}
|
|
67
|
+
self.session = requests.Session()
|
|
68
|
+
self.session.headers.update(self.headers)
|
|
69
|
+
self.session.proxies.update(proxies)
|
|
70
|
+
self.timeout = timeout
|
|
71
|
+
self.last_response = {}
|
|
72
|
+
self.model = model
|
|
73
|
+
self.system_prompt = system_prompt
|
|
74
|
+
self.is_conversation = is_conversation
|
|
75
|
+
self.max_tokens_to_sample = max_tokens
|
|
76
|
+
self.__available_optimizers = (
|
|
77
|
+
method
|
|
78
|
+
for method in dir(Optimizers)
|
|
79
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
80
|
+
)
|
|
81
|
+
Conversation.intro = (
|
|
82
|
+
AwesomePrompts().get_act(
|
|
83
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
84
|
+
)
|
|
85
|
+
if act
|
|
86
|
+
else intro or Conversation.intro
|
|
87
|
+
)
|
|
88
|
+
self.conversation = Conversation(
|
|
89
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
90
|
+
)
|
|
91
|
+
self.conversation.history_offset = history_offset
|
|
92
|
+
|
|
93
|
+
def ask(
|
|
94
|
+
self,
|
|
95
|
+
prompt: str,
|
|
96
|
+
stream: bool = False,
|
|
97
|
+
raw: bool = False,
|
|
98
|
+
optimizer: str = None,
|
|
99
|
+
conversationally: bool = False,
|
|
100
|
+
) -> Union[Dict, Generator]:
|
|
101
|
+
"""Sends a chat completion request to the AIMathGPT API."""
|
|
102
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
103
|
+
|
|
104
|
+
if optimizer:
|
|
105
|
+
if optimizer in self.__available_optimizers:
|
|
106
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
107
|
+
conversation_prompt if conversationally else prompt
|
|
108
|
+
)
|
|
109
|
+
else:
|
|
110
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
payload = {
|
|
114
|
+
"messages": [
|
|
115
|
+
{"role": "system", "content": self.system_prompt},
|
|
116
|
+
{"role": "user", "content": conversation_prompt},
|
|
117
|
+
],
|
|
118
|
+
"model": self.model,
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def for_stream():
|
|
123
|
+
try:
|
|
124
|
+
with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
|
|
125
|
+
if response.status_code != 200:
|
|
126
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}: {response.text}")
|
|
127
|
+
|
|
128
|
+
streaming_text = ""
|
|
129
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
130
|
+
if line:
|
|
131
|
+
try:
|
|
132
|
+
data = json.loads(line)
|
|
133
|
+
if 'result' in data and 'response' in data['result']:
|
|
134
|
+
content = data['result']['response']
|
|
135
|
+
streaming_text += content
|
|
136
|
+
resp = dict(text=content) # Yield only the new content
|
|
137
|
+
yield resp if raw else resp
|
|
138
|
+
else:
|
|
139
|
+
pass
|
|
140
|
+
except json.JSONDecodeError:
|
|
141
|
+
pass
|
|
142
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
143
|
+
self.last_response.update({"text": streaming_text})
|
|
144
|
+
except requests.exceptions.RequestException as e:
|
|
145
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
146
|
+
|
|
147
|
+
def for_non_stream():
|
|
148
|
+
for _ in for_stream():
|
|
149
|
+
pass
|
|
150
|
+
return self.last_response
|
|
151
|
+
|
|
152
|
+
return for_stream() if stream else for_non_stream()
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def chat(
|
|
157
|
+
self,
|
|
158
|
+
prompt: str,
|
|
159
|
+
stream: bool = False,
|
|
160
|
+
optimizer: str = None,
|
|
161
|
+
conversationally: bool = False,
|
|
162
|
+
) -> Union[str, Generator]:
|
|
163
|
+
|
|
164
|
+
def for_stream():
|
|
165
|
+
for response in self.ask(
|
|
166
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
167
|
+
):
|
|
168
|
+
yield self.get_message(response)
|
|
169
|
+
|
|
170
|
+
def for_non_stream():
|
|
171
|
+
return self.get_message(
|
|
172
|
+
self.ask(
|
|
173
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
174
|
+
)
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
return for_stream() if stream else for_non_stream()
|
|
178
|
+
|
|
179
|
+
def get_message(self, response: dict) -> str:
|
|
180
|
+
"""Retrieves message only from response"""
|
|
181
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
182
|
+
return response["text"]
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
if __name__ == "__main__":
|
|
186
|
+
from rich import print
|
|
187
|
+
bot = AIMathGPT()
|
|
188
|
+
try:
|
|
189
|
+
response = bot.chat("What is the capital of France?", stream=True)
|
|
190
|
+
for chunk in response:
|
|
191
|
+
print(chunk, end="", flush=True)
|
|
192
|
+
except Exception as e:
|
|
193
|
+
print(f"An error occurred: {e}")
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
|
|
12
|
+
class Bagoodex(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the Bagoodex API.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
is_conversation: bool = True,
|
|
20
|
+
max_tokens: int = 2049, # Set a reasonable default
|
|
21
|
+
timeout: int = 30,
|
|
22
|
+
intro: str = None,
|
|
23
|
+
filepath: str = None,
|
|
24
|
+
update_file: bool = True,
|
|
25
|
+
proxies: dict = {},
|
|
26
|
+
history_offset: int = 10250,
|
|
27
|
+
act: str = None,
|
|
28
|
+
):
|
|
29
|
+
"""Initializes the Bagoodex API client."""
|
|
30
|
+
self.url = "https://bagoodex.io/front-api/chat"
|
|
31
|
+
self.headers = {"Content-Type": "application/json"}
|
|
32
|
+
self.session = requests.Session()
|
|
33
|
+
self.session.headers.update(self.headers)
|
|
34
|
+
self.session.proxies.update(proxies) # Use provided proxies
|
|
35
|
+
self.timeout = timeout
|
|
36
|
+
self.last_response = {}
|
|
37
|
+
|
|
38
|
+
self.is_conversation = is_conversation
|
|
39
|
+
self.max_tokens_to_sample = max_tokens
|
|
40
|
+
self.__available_optimizers = (
|
|
41
|
+
method
|
|
42
|
+
for method in dir(Optimizers)
|
|
43
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
44
|
+
)
|
|
45
|
+
Conversation.intro = (
|
|
46
|
+
AwesomePrompts().get_act(
|
|
47
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
48
|
+
)
|
|
49
|
+
if act
|
|
50
|
+
else intro or Conversation.intro
|
|
51
|
+
)
|
|
52
|
+
self.conversation = Conversation(
|
|
53
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
54
|
+
)
|
|
55
|
+
self.conversation.history_offset = history_offset
|
|
56
|
+
|
|
57
|
+
def ask(
|
|
58
|
+
self,
|
|
59
|
+
prompt: str,
|
|
60
|
+
stream: bool = False,
|
|
61
|
+
raw: bool = False,
|
|
62
|
+
optimizer: str = None,
|
|
63
|
+
conversationally: bool = False,
|
|
64
|
+
) -> Dict[str, Any] | Generator:
|
|
65
|
+
"""Sends a chat completion request to the Bagoodex API."""
|
|
66
|
+
|
|
67
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
68
|
+
if optimizer:
|
|
69
|
+
if optimizer in self.__available_optimizers:
|
|
70
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
71
|
+
conversation_prompt if conversationally else prompt
|
|
72
|
+
)
|
|
73
|
+
else:
|
|
74
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
payload = {
|
|
78
|
+
"prompt": "You are AI", # This seems to be required by the API
|
|
79
|
+
"messages": [{"content": "Hi, this is chatgpt, let's talk", "role": "assistant"}],
|
|
80
|
+
"input": conversation_prompt,
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
def for_stream():
|
|
84
|
+
try:
|
|
85
|
+
response = self.session.post(self.url, json=payload, headers=self.headers, timeout=self.timeout)
|
|
86
|
+
response.raise_for_status()
|
|
87
|
+
text = response.text
|
|
88
|
+
self.last_response.update({"text": text})
|
|
89
|
+
|
|
90
|
+
if stream:
|
|
91
|
+
for char in text:
|
|
92
|
+
yield {"text": char} # Yielding one character at a time for streaming
|
|
93
|
+
else:
|
|
94
|
+
yield {"text": text}
|
|
95
|
+
|
|
96
|
+
except (requests.exceptions.RequestException, json.JSONDecodeError) as e: # Catch JSON errors too
|
|
97
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
|
|
98
|
+
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
99
|
+
|
|
100
|
+
def for_non_stream():
|
|
101
|
+
for _ in for_stream(): pass
|
|
102
|
+
return self.last_response
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
return for_stream() if stream else for_non_stream()
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def chat(
|
|
111
|
+
self,
|
|
112
|
+
prompt: str,
|
|
113
|
+
stream: bool = False,
|
|
114
|
+
optimizer: str = None,
|
|
115
|
+
conversationally: bool = False,
|
|
116
|
+
) -> Union[str, Generator]:
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def for_stream():
|
|
120
|
+
for response in self.ask(
|
|
121
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
122
|
+
):
|
|
123
|
+
yield self.get_message(response)
|
|
124
|
+
|
|
125
|
+
def for_non_stream():
|
|
126
|
+
return self.get_message(
|
|
127
|
+
self.ask(
|
|
128
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
129
|
+
)
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
return for_stream() if stream else for_non_stream()
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def get_message(self, response: dict) -> str:
|
|
136
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
137
|
+
return response.get("text", "")
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
if __name__ == "__main__":
|
|
141
|
+
from rich import print
|
|
142
|
+
ai = Bagoodex()
|
|
143
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
144
|
+
for chunk in response:
|
|
145
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/bixin.py
CHANGED
|
@@ -174,7 +174,7 @@ class Bixin(Provider):
|
|
|
174
174
|
if text.startswith(previous_text):
|
|
175
175
|
new_text = text[len(previous_text):]
|
|
176
176
|
full_response += new_text
|
|
177
|
-
yield new_text if raw else dict(text=
|
|
177
|
+
yield new_text if raw else dict(text=new_text)
|
|
178
178
|
previous_text = text
|
|
179
179
|
else:
|
|
180
180
|
full_response += text
|
|
@@ -258,7 +258,7 @@ class Bixin(Provider):
|
|
|
258
258
|
if __name__ == "__main__":
|
|
259
259
|
from rich import print
|
|
260
260
|
|
|
261
|
-
ai = Bixin()
|
|
262
|
-
response = ai.chat(
|
|
261
|
+
ai = Bixin(timeout=5000)
|
|
262
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
263
263
|
for chunk in response:
|
|
264
264
|
print(chunk, end="", flush=True)
|
webscout/Provider/cleeai.py
CHANGED
|
@@ -142,7 +142,7 @@ class Cleeai(Provider):
|
|
|
142
142
|
full_response = ''
|
|
143
143
|
for chunk in response.iter_content(chunk_size=self.stream_chunk_size):
|
|
144
144
|
full_response += chunk.decode('utf-8')
|
|
145
|
-
yield chunk.decode('utf-8') if raw else dict(text=
|
|
145
|
+
yield chunk.decode('utf-8') if raw else dict(text=chunk.decode('utf-8'))
|
|
146
146
|
|
|
147
147
|
self.last_response.update(dict(text=full_response))
|
|
148
148
|
self.conversation.update_chat_history(
|
|
@@ -206,7 +206,7 @@ class Cleeai(Provider):
|
|
|
206
206
|
|
|
207
207
|
if __name__ == "__main__":
|
|
208
208
|
from rich import print
|
|
209
|
-
ai = Cleeai()
|
|
210
|
-
response = ai.chat("
|
|
209
|
+
ai = Cleeai(timeout=5000)
|
|
210
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
211
211
|
for chunk in response:
|
|
212
212
|
print(chunk, end="", flush=True)
|
webscout/Provider/elmo.py
CHANGED
|
@@ -1,7 +1,4 @@
|
|
|
1
1
|
import requests
|
|
2
|
-
import json
|
|
3
|
-
import textwrap
|
|
4
|
-
|
|
5
2
|
from webscout.AIutel import Optimizers
|
|
6
3
|
from webscout.AIutel import Conversation
|
|
7
4
|
from webscout.AIutel import AwesomePrompts
|
|
@@ -169,7 +166,7 @@ class Elmo(Provider):
|
|
|
169
166
|
)
|
|
170
167
|
full_response += formatted_output
|
|
171
168
|
self.last_response.update(dict(text=full_response))
|
|
172
|
-
yield formatted_output if raw else dict(text=
|
|
169
|
+
yield formatted_output if raw else dict(text=formatted_output)
|
|
173
170
|
self.conversation.update_chat_history(
|
|
174
171
|
prompt, self.get_message(self.last_response)
|
|
175
172
|
)
|
|
@@ -232,6 +229,6 @@ class Elmo(Provider):
|
|
|
232
229
|
if __name__ == "__main__":
|
|
233
230
|
from rich import print
|
|
234
231
|
ai = Elmo()
|
|
235
|
-
response = ai.chat("
|
|
232
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
236
233
|
for chunk in response:
|
|
237
234
|
print(chunk, end="", flush=True)
|
webscout/Provider/felo_search.py
CHANGED
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, List, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class GaurishCerebras(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the Gaurish Cerebras API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
is_conversation: bool = True,
|
|
21
|
+
max_tokens: int = 2049,
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
intro: str = None,
|
|
24
|
+
filepath: str = None,
|
|
25
|
+
update_file: bool = True,
|
|
26
|
+
proxies: dict = {},
|
|
27
|
+
history_offset: int = 10250,
|
|
28
|
+
act: str = None,
|
|
29
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
30
|
+
):
|
|
31
|
+
"""Initializes the Gaurish Cerebras API client."""
|
|
32
|
+
self.url = "https://proxy.gaurish.xyz/api/cerebras/v1/chat/completions"
|
|
33
|
+
self.headers = {
|
|
34
|
+
"Authorization": "Bearer 123",
|
|
35
|
+
"Content-Type": "application/json",
|
|
36
|
+
"Accept": "text/event-stream",
|
|
37
|
+
}
|
|
38
|
+
self.session = requests.Session()
|
|
39
|
+
self.session.headers.update(self.headers)
|
|
40
|
+
self.session.proxies.update(proxies)
|
|
41
|
+
self.timeout = timeout
|
|
42
|
+
self.last_response = {}
|
|
43
|
+
|
|
44
|
+
self.is_conversation = is_conversation
|
|
45
|
+
self.max_tokens_to_sample = max_tokens
|
|
46
|
+
self.__available_optimizers = (
|
|
47
|
+
method
|
|
48
|
+
for method in dir(Optimizers)
|
|
49
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
50
|
+
)
|
|
51
|
+
Conversation.intro = (
|
|
52
|
+
AwesomePrompts().get_act(
|
|
53
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
54
|
+
)
|
|
55
|
+
if act
|
|
56
|
+
else intro or system_prompt or Conversation.intro
|
|
57
|
+
)
|
|
58
|
+
self.conversation = Conversation(
|
|
59
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
60
|
+
)
|
|
61
|
+
self.conversation.history_offset = history_offset
|
|
62
|
+
self.system_prompt = system_prompt # Store the system prompt
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def ask(
|
|
66
|
+
self,
|
|
67
|
+
prompt: str,
|
|
68
|
+
stream: bool = False,
|
|
69
|
+
raw: bool = False,
|
|
70
|
+
optimizer: str = None,
|
|
71
|
+
conversationally: bool = False,
|
|
72
|
+
) -> Union[Dict, Generator]:
|
|
73
|
+
|
|
74
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
75
|
+
if optimizer:
|
|
76
|
+
if optimizer in self.__available_optimizers:
|
|
77
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
78
|
+
conversation_prompt if conversationally else prompt
|
|
79
|
+
)
|
|
80
|
+
else:
|
|
81
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
82
|
+
|
|
83
|
+
payload = {
|
|
84
|
+
"messages": [
|
|
85
|
+
{"role": "system", "content": self.system_prompt},
|
|
86
|
+
{"role": "user", "content": conversation_prompt},
|
|
87
|
+
],
|
|
88
|
+
"model": "llama3.1-70b",
|
|
89
|
+
"temperature": 0.75,
|
|
90
|
+
"stream": stream,
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
def for_stream():
|
|
94
|
+
try:
|
|
95
|
+
with self.session.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
|
|
96
|
+
response.raise_for_status()
|
|
97
|
+
streaming_text = ""
|
|
98
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
99
|
+
if line:
|
|
100
|
+
line = line.strip()
|
|
101
|
+
if line.startswith("data: "):
|
|
102
|
+
line = line[6:]
|
|
103
|
+
if line == "[DONE]":
|
|
104
|
+
break
|
|
105
|
+
try:
|
|
106
|
+
data = json.loads(line)
|
|
107
|
+
if "choices" in data and data["choices"][0]["delta"].get("content"):
|
|
108
|
+
content = data["choices"][0]["delta"]["content"]
|
|
109
|
+
streaming_text += content
|
|
110
|
+
resp = dict(text=content) # Yield only the new content
|
|
111
|
+
yield resp if raw else resp
|
|
112
|
+
except json.JSONDecodeError:
|
|
113
|
+
# print(f"[Warning] Invalid JSON chunk received: {line}")
|
|
114
|
+
pass
|
|
115
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
116
|
+
self.last_response.update({"text": streaming_text})
|
|
117
|
+
|
|
118
|
+
except requests.exceptions.RequestException as e:
|
|
119
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def for_non_stream():
|
|
123
|
+
for _ in for_stream():
|
|
124
|
+
pass
|
|
125
|
+
return self.last_response
|
|
126
|
+
|
|
127
|
+
return for_stream() if stream else for_non_stream()
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def chat(
|
|
132
|
+
self,
|
|
133
|
+
prompt: str,
|
|
134
|
+
stream: bool = False,
|
|
135
|
+
optimizer: str = None,
|
|
136
|
+
conversationally: bool = False,
|
|
137
|
+
) -> Union[str, Generator]:
|
|
138
|
+
|
|
139
|
+
def for_stream():
|
|
140
|
+
for response in self.ask(
|
|
141
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
142
|
+
):
|
|
143
|
+
yield self.get_message(response)
|
|
144
|
+
|
|
145
|
+
def for_non_stream():
|
|
146
|
+
return self.get_message(
|
|
147
|
+
self.ask(
|
|
148
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
149
|
+
)
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
return for_stream() if stream else for_non_stream()
|
|
153
|
+
|
|
154
|
+
def get_message(self, response: dict) -> str:
|
|
155
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
156
|
+
return response["text"]
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
if __name__ == "__main__":
|
|
161
|
+
from rich import print
|
|
162
|
+
bot = GaurishCerebras()
|
|
163
|
+
try:
|
|
164
|
+
response = bot.chat("What is the capital of France?", stream=True)
|
|
165
|
+
for chunk in response:
|
|
166
|
+
print(chunk, end="", flush=True)
|
|
167
|
+
except Exception as e:
|
|
168
|
+
print(f"An error occurred: {e}")
|