webscout 6.0__py3-none-any.whl → 6.2b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +77 -259
- webscout/Agents/Onlinesearcher.py +22 -10
- webscout/Agents/functioncall.py +2 -2
- webscout/Bard.py +21 -21
- webscout/Extra/autollama.py +37 -20
- webscout/Local/__init__.py +6 -7
- webscout/Local/formats.py +404 -194
- webscout/Local/model.py +1074 -477
- webscout/Local/samplers.py +108 -144
- webscout/Local/thread.py +251 -410
- webscout/Local/ui.py +401 -0
- webscout/Local/utils.py +338 -136
- webscout/Provider/Amigo.py +51 -38
- webscout/Provider/Deepseek.py +7 -6
- webscout/Provider/EDITEE.py +2 -2
- webscout/Provider/GPTWeb.py +1 -1
- webscout/Provider/NinjaChat.py +200 -0
- webscout/Provider/OLLAMA.py +1 -1
- webscout/Provider/Perplexity.py +1 -1
- webscout/Provider/Reka.py +12 -5
- webscout/Provider/TTI/AIuncensored.py +103 -0
- webscout/Provider/TTI/Nexra.py +3 -3
- webscout/Provider/TTI/__init__.py +3 -2
- webscout/Provider/TTI/aiforce.py +2 -2
- webscout/Provider/TTI/imgninza.py +136 -0
- webscout/Provider/TeachAnything.py +0 -3
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +12 -11
- webscout/Provider/{ChatHub.py → aimathgpt.py} +72 -88
- webscout/Provider/cerebras.py +125 -118
- webscout/Provider/cleeai.py +1 -1
- webscout/Provider/felo_search.py +1 -1
- webscout/Provider/gaurish.py +207 -0
- webscout/Provider/geminiprorealtime.py +160 -0
- webscout/Provider/genspark.py +1 -1
- webscout/Provider/julius.py +8 -3
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/promptrefine.py +3 -1
- webscout/Provider/turboseek.py +3 -8
- webscout/Provider/tutorai.py +1 -1
- webscout/__init__.py +2 -43
- webscout/exceptions.py +5 -1
- webscout/tempid.py +4 -73
- webscout/utils.py +3 -0
- webscout/version.py +1 -1
- webscout/webai.py +1 -1
- webscout/webscout_search.py +154 -123
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/METADATA +156 -236
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/RECORD +53 -54
- webscout/Local/rawdog.py +0 -946
- webscout/Provider/BasedGPT.py +0 -214
- webscout/Provider/TTI/amigo.py +0 -148
- webscout/Provider/aigames.py +0 -213
- webscout/Provider/bixin.py +0 -264
- webscout/Provider/xdash.py +0 -182
- webscout/websx_search.py +0 -19
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/LICENSE.md +0 -0
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/WHEEL +0 -0
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/entry_points.txt +0 -0
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/top_level.txt +0 -0
webscout/Provider/__init__.py
CHANGED
|
@@ -16,7 +16,6 @@ from .Phind import PhindSearch
|
|
|
16
16
|
from .Phind import Phindv2
|
|
17
17
|
from .ai4chat import *
|
|
18
18
|
from .Gemini import GEMINI
|
|
19
|
-
from .BasedGPT import BasedGPT
|
|
20
19
|
from .Deepseek import DeepSeek
|
|
21
20
|
from .Deepinfra import DeepInfra
|
|
22
21
|
from .Farfalle import *
|
|
@@ -31,7 +30,6 @@ from .RUBIKSAI import *
|
|
|
31
30
|
from .meta import *
|
|
32
31
|
from .DiscordRocks import *
|
|
33
32
|
from .felo_search import *
|
|
34
|
-
from .xdash import *
|
|
35
33
|
from .julius import *
|
|
36
34
|
from .Youchat import *
|
|
37
35
|
from .yep import *
|
|
@@ -51,17 +49,20 @@ from .genspark import *
|
|
|
51
49
|
from .upstage import *
|
|
52
50
|
from .Bing import *
|
|
53
51
|
from .GPTWeb import *
|
|
54
|
-
from .aigames import *
|
|
52
|
+
# from .UNFINISHED.aigames import *
|
|
55
53
|
from .llamatutor import *
|
|
56
54
|
from .promptrefine import *
|
|
57
55
|
from .twitterclone import *
|
|
58
56
|
from .tutorai import *
|
|
59
|
-
from .bixin import *
|
|
60
57
|
from .ChatGPTES import *
|
|
61
58
|
from .Amigo import *
|
|
62
59
|
from .prefind import *
|
|
63
60
|
from .bagoodex import *
|
|
64
|
-
from .ChatHub import *
|
|
61
|
+
# from .UNFINISHED.ChatHub import *
|
|
62
|
+
from .aimathgpt import *
|
|
63
|
+
from .gaurish import *
|
|
64
|
+
from .geminiprorealtime import *
|
|
65
|
+
from .NinjaChat import *
|
|
65
66
|
__all__ = [
|
|
66
67
|
'Farfalle',
|
|
67
68
|
'LLAMA',
|
|
@@ -79,7 +80,6 @@ __all__ = [
|
|
|
79
80
|
'PhindSearch',
|
|
80
81
|
'Felo',
|
|
81
82
|
'GEMINI',
|
|
82
|
-
'BasedGPT',
|
|
83
83
|
'DeepSeek',
|
|
84
84
|
'DeepInfra',
|
|
85
85
|
'AI4Chat',
|
|
@@ -94,7 +94,6 @@ __all__ = [
|
|
|
94
94
|
'Meta',
|
|
95
95
|
'DiscordRocks',
|
|
96
96
|
'PiAI',
|
|
97
|
-
'XDASH',
|
|
98
97
|
'Julius',
|
|
99
98
|
'YouChat',
|
|
100
99
|
'YEPCHAT',
|
|
@@ -115,18 +114,20 @@ __all__ = [
|
|
|
115
114
|
'Free2GPT',
|
|
116
115
|
'Bing',
|
|
117
116
|
'GPTWeb',
|
|
118
|
-
'AIGameIO',
|
|
117
|
+
# 'AIGameIO',
|
|
119
118
|
'LlamaTutor',
|
|
120
119
|
'PromptRefine',
|
|
121
120
|
'AIUncensored',
|
|
122
121
|
'TutorAI',
|
|
123
|
-
'Bixin',
|
|
124
122
|
'ChatGPTES',
|
|
125
123
|
'AmigoChat',
|
|
126
124
|
'PrefindAI',
|
|
127
125
|
'Bagoodex',
|
|
128
|
-
'ChatHub',
|
|
129
|
-
|
|
126
|
+
# 'ChatHub',
|
|
127
|
+
'AIMathGPT',
|
|
128
|
+
'GaurishCerebras',
|
|
129
|
+
'GeminiPro',
|
|
130
|
+
'NinjaChat',
|
|
130
131
|
|
|
131
132
|
|
|
132
133
|
]
|
|
@@ -9,29 +9,16 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
|
9
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
10
10
|
from webscout import exceptions
|
|
11
11
|
|
|
12
|
-
|
|
12
|
+
|
|
13
|
+
class AIMathGPT(Provider):
|
|
13
14
|
"""
|
|
14
|
-
A class to interact with the
|
|
15
|
+
A class to interact with the AIMathGPT API.
|
|
15
16
|
"""
|
|
16
17
|
|
|
17
|
-
AVAILABLE_MODELS = [
|
|
18
|
-
'meta/llama3.1-8b',
|
|
19
|
-
'mistral/mixtral-8x7b',
|
|
20
|
-
'google/gemma-2',
|
|
21
|
-
'perplexity/sonar-online',
|
|
22
|
-
]
|
|
23
|
-
model_aliases = { # Aliases for shorter model names
|
|
24
|
-
"llama3.1-8b": 'meta/llama3.1-8b',
|
|
25
|
-
"mixtral-8x7b": 'mistral/mixtral-8x7b',
|
|
26
|
-
"gemma-2": 'google/gemma-2',
|
|
27
|
-
"sonar-online": 'perplexity/sonar-online',
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
|
|
31
18
|
def __init__(
|
|
32
19
|
self,
|
|
33
20
|
is_conversation: bool = True,
|
|
34
|
-
max_tokens: int = 2049,
|
|
21
|
+
max_tokens: int = 2049,
|
|
35
22
|
timeout: int = 30,
|
|
36
23
|
intro: str = None,
|
|
37
24
|
filepath: str = None,
|
|
@@ -39,25 +26,51 @@ class ChatHub(Provider):
|
|
|
39
26
|
proxies: dict = {},
|
|
40
27
|
history_offset: int = 10250,
|
|
41
28
|
act: str = None,
|
|
42
|
-
model: str = "
|
|
29
|
+
model: str = "llama3", # Default model
|
|
30
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
43
31
|
):
|
|
44
|
-
"""
|
|
45
|
-
|
|
46
|
-
|
|
32
|
+
"""
|
|
33
|
+
Initializes the AIMathGPT API with the given parameters.
|
|
34
|
+
"""
|
|
35
|
+
self.url = "https://aimathgpt.forit.ai/api/ai"
|
|
47
36
|
self.headers = {
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
37
|
+
"authority": "aimathgpt.forit.ai",
|
|
38
|
+
"method": "POST",
|
|
39
|
+
"path": "/api/ai",
|
|
40
|
+
"scheme": "https",
|
|
41
|
+
"accept": "*/*",
|
|
42
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
43
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
44
|
+
"content-type": "application/json",
|
|
45
|
+
"cookie": (
|
|
46
|
+
"NEXT_LOCALE=en; _ga=GA1.1.1515823701.1726936796; "
|
|
47
|
+
"_ga_1F3ZVN96B1=GS1.1.1726936795.1.1.1726936833.0.0.0"
|
|
48
|
+
),
|
|
49
|
+
"dnt": "1",
|
|
50
|
+
"origin": "https://aimathgpt.forit.ai",
|
|
51
|
+
"priority": "u=1, i",
|
|
52
|
+
"referer": "https://aimathgpt.forit.ai/?ref=taaft&utm_source=taaft&utm_medium=referral",
|
|
53
|
+
"sec-ch-ua": (
|
|
54
|
+
"\"Microsoft Edge\";v=\"129\", \"Not=A?Brand\";v=\"8\", \"Chromium\";v=\"129\""
|
|
55
|
+
),
|
|
56
|
+
"sec-ch-ua-mobile": "?0",
|
|
57
|
+
"sec-ch-ua-platform": "\"Windows\"",
|
|
58
|
+
"sec-fetch-dest": "empty",
|
|
59
|
+
"sec-fetch-mode": "cors",
|
|
60
|
+
"sec-fetch-site": "same-origin",
|
|
61
|
+
"user-agent": (
|
|
62
|
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
63
|
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
64
|
+
"Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"
|
|
65
|
+
),
|
|
54
66
|
}
|
|
55
67
|
self.session = requests.Session()
|
|
56
|
-
self.session.headers.update(self.headers)
|
|
57
|
-
self.session.proxies.update(proxies)
|
|
68
|
+
self.session.headers.update(self.headers)
|
|
69
|
+
self.session.proxies.update(proxies)
|
|
58
70
|
self.timeout = timeout
|
|
59
71
|
self.last_response = {}
|
|
60
|
-
|
|
72
|
+
self.model = model
|
|
73
|
+
self.system_prompt = system_prompt
|
|
61
74
|
self.is_conversation = is_conversation
|
|
62
75
|
self.max_tokens_to_sample = max_tokens
|
|
63
76
|
self.__available_optimizers = (
|
|
@@ -72,29 +85,11 @@ class ChatHub(Provider):
|
|
|
72
85
|
if act
|
|
73
86
|
else intro or Conversation.intro
|
|
74
87
|
)
|
|
75
|
-
|
|
76
88
|
self.conversation = Conversation(
|
|
77
89
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
78
90
|
)
|
|
79
91
|
self.conversation.history_offset = history_offset
|
|
80
92
|
|
|
81
|
-
#Resolve the model
|
|
82
|
-
self.model = self.get_model(model)
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
def get_model(self, model: str) -> str:
|
|
86
|
-
"""
|
|
87
|
-
Resolves the model name using aliases or defaults.
|
|
88
|
-
"""
|
|
89
|
-
|
|
90
|
-
if model in self.AVAILABLE_MODELS:
|
|
91
|
-
return model
|
|
92
|
-
elif model in self.model_aliases:
|
|
93
|
-
return self.model_aliases[model]
|
|
94
|
-
else:
|
|
95
|
-
print(f"Model '{model}' not found. Using default model '{self.default_model}'.")
|
|
96
|
-
return self.default_model # Use class-level default
|
|
97
|
-
|
|
98
93
|
def ask(
|
|
99
94
|
self,
|
|
100
95
|
prompt: str,
|
|
@@ -102,9 +97,10 @@ class ChatHub(Provider):
|
|
|
102
97
|
raw: bool = False,
|
|
103
98
|
optimizer: str = None,
|
|
104
99
|
conversationally: bool = False,
|
|
105
|
-
) -> Union[Dict
|
|
106
|
-
|
|
100
|
+
) -> Union[Dict, Generator]:
|
|
101
|
+
"""Sends a chat completion request to the AIMathGPT API."""
|
|
107
102
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
103
|
+
|
|
108
104
|
if optimizer:
|
|
109
105
|
if optimizer in self.__available_optimizers:
|
|
110
106
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
@@ -114,44 +110,39 @@ class ChatHub(Provider):
|
|
|
114
110
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
115
111
|
|
|
116
112
|
|
|
117
|
-
|
|
113
|
+
payload = {
|
|
114
|
+
"messages": [
|
|
115
|
+
{"role": "system", "content": self.system_prompt},
|
|
116
|
+
{"role": "user", "content": conversation_prompt},
|
|
117
|
+
],
|
|
118
118
|
"model": self.model,
|
|
119
|
-
"messages": [{"role": "user", "content": conversation_prompt}],
|
|
120
|
-
"tools": []
|
|
121
119
|
}
|
|
122
120
|
|
|
123
|
-
# Set the Referer header dynamically based on the resolved model
|
|
124
|
-
self.headers['Referer'] = f"{self.url}/chat/{self.model}"
|
|
125
|
-
|
|
126
121
|
|
|
127
122
|
def for_stream():
|
|
128
123
|
try:
|
|
129
|
-
with requests.post(self.
|
|
130
|
-
response.
|
|
131
|
-
|
|
124
|
+
with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
|
|
125
|
+
if response.status_code != 200:
|
|
126
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}: {response.text}")
|
|
132
127
|
|
|
128
|
+
streaming_text = ""
|
|
133
129
|
for line in response.iter_lines(decode_unicode=True):
|
|
134
130
|
if line:
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
yield resp if raw else resp
|
|
147
|
-
|
|
148
|
-
except json.JSONDecodeError:
|
|
149
|
-
continue
|
|
131
|
+
try:
|
|
132
|
+
data = json.loads(line)
|
|
133
|
+
if 'result' in data and 'response' in data['result']:
|
|
134
|
+
content = data['result']['response']
|
|
135
|
+
streaming_text += content
|
|
136
|
+
resp = dict(text=content) # Yield only the new content
|
|
137
|
+
yield resp if raw else resp
|
|
138
|
+
else:
|
|
139
|
+
pass
|
|
140
|
+
except json.JSONDecodeError:
|
|
141
|
+
pass
|
|
150
142
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
151
143
|
self.last_response.update({"text": streaming_text})
|
|
152
144
|
except requests.exceptions.RequestException as e:
|
|
153
|
-
raise exceptions.FailedToGenerateResponseError(f"Request
|
|
154
|
-
|
|
145
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
155
146
|
|
|
156
147
|
def for_non_stream():
|
|
157
148
|
for _ in for_stream():
|
|
@@ -162,7 +153,6 @@ class ChatHub(Provider):
|
|
|
162
153
|
|
|
163
154
|
|
|
164
155
|
|
|
165
|
-
|
|
166
156
|
def chat(
|
|
167
157
|
self,
|
|
168
158
|
prompt: str,
|
|
@@ -170,7 +160,6 @@ class ChatHub(Provider):
|
|
|
170
160
|
optimizer: str = None,
|
|
171
161
|
conversationally: bool = False,
|
|
172
162
|
) -> Union[str, Generator]:
|
|
173
|
-
"""Generate response `str`"""
|
|
174
163
|
|
|
175
164
|
def for_stream():
|
|
176
165
|
for response in self.ask(
|
|
@@ -181,29 +170,24 @@ class ChatHub(Provider):
|
|
|
181
170
|
def for_non_stream():
|
|
182
171
|
return self.get_message(
|
|
183
172
|
self.ask(
|
|
184
|
-
prompt,
|
|
185
|
-
stream=False, # Pass stream=False
|
|
186
|
-
optimizer=optimizer,
|
|
187
|
-
conversationally=conversationally,
|
|
173
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
188
174
|
)
|
|
189
175
|
)
|
|
190
176
|
|
|
191
177
|
return for_stream() if stream else for_non_stream()
|
|
192
178
|
|
|
193
|
-
|
|
194
|
-
|
|
195
179
|
def get_message(self, response: dict) -> str:
|
|
196
180
|
"""Retrieves message only from response"""
|
|
197
181
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
198
|
-
return response
|
|
182
|
+
return response["text"]
|
|
199
183
|
|
|
200
184
|
|
|
201
185
|
if __name__ == "__main__":
|
|
202
186
|
from rich import print
|
|
203
|
-
bot =
|
|
187
|
+
bot = AIMathGPT()
|
|
204
188
|
try:
|
|
205
|
-
response = bot.chat("
|
|
189
|
+
response = bot.chat("What is the capital of France?", stream=True)
|
|
206
190
|
for chunk in response:
|
|
207
191
|
print(chunk, end="", flush=True)
|
|
208
192
|
except Exception as e:
|
|
209
|
-
print(f"An error occurred: {e}")
|
|
193
|
+
print(f"An error occurred: {e}")
|
webscout/Provider/cerebras.py
CHANGED
|
@@ -1,22 +1,27 @@
|
|
|
1
|
-
import
|
|
1
|
+
import re
|
|
2
2
|
import requests
|
|
3
|
-
|
|
4
|
-
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from typing import Any, Dict, Optional, Generator, List, Union
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
5
11
|
from webscout import exceptions
|
|
6
|
-
from
|
|
12
|
+
from fake_useragent import UserAgent
|
|
13
|
+
from cerebras.cloud.sdk import Cerebras
|
|
14
|
+
|
|
7
15
|
|
|
8
16
|
class Cerebras(Provider):
|
|
9
17
|
"""
|
|
10
|
-
A class to interact with the Cerebras
|
|
18
|
+
A class to interact with the Cerebras API using a cookie for authentication.
|
|
11
19
|
"""
|
|
12
20
|
|
|
13
|
-
AVAILABLE_MODELS = ["llama3.1-8b", "llama3.1-70b"]
|
|
14
|
-
|
|
15
21
|
def __init__(
|
|
16
22
|
self,
|
|
17
|
-
api_key: str,
|
|
18
23
|
is_conversation: bool = True,
|
|
19
|
-
max_tokens: int =
|
|
24
|
+
max_tokens: int = 2049,
|
|
20
25
|
timeout: int = 30,
|
|
21
26
|
intro: str = None,
|
|
22
27
|
filepath: str = None,
|
|
@@ -24,47 +29,39 @@ class Cerebras(Provider):
|
|
|
24
29
|
proxies: dict = {},
|
|
25
30
|
history_offset: int = 10250,
|
|
26
31
|
act: str = None,
|
|
27
|
-
|
|
28
|
-
|
|
32
|
+
cookie_path: str = "cookie.json", # Path to cookie file
|
|
33
|
+
model: str = "llama3.1-8b", # Default model
|
|
34
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
29
35
|
):
|
|
30
36
|
"""
|
|
31
|
-
Initializes the Cerebras
|
|
37
|
+
Initializes the Cerebras client with the provided cookie.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
cookie_path (str): Path to the cookie JSON file.
|
|
41
|
+
model (str, optional): Model name to use. Defaults to 'llama3.1-8b'.
|
|
42
|
+
system_prompt (str, optional): The system prompt to send with every request. Defaults to "You are a helpful assistant.".
|
|
43
|
+
|
|
44
|
+
Raises:
|
|
45
|
+
FileNotFoundError: If the cookie file is not found.
|
|
46
|
+
json.JSONDecodeError: If the cookie file has an invalid JSON format.
|
|
47
|
+
requests.exceptions.RequestException: If there's an error retrieving the API key.
|
|
32
48
|
"""
|
|
33
|
-
|
|
34
|
-
|
|
49
|
+
self.api_key = self.get_demo_api_key(cookie_path)
|
|
50
|
+
self.client = Cerebras(api_key=self.api_key)
|
|
51
|
+
self.model = model
|
|
52
|
+
self.system_prompt = system_prompt
|
|
35
53
|
|
|
36
|
-
self.session = requests.Session()
|
|
37
54
|
self.is_conversation = is_conversation
|
|
38
55
|
self.max_tokens_to_sample = max_tokens
|
|
39
|
-
self.api_endpoint = "https://api.cerebras.ai/v1/chat/completions"
|
|
40
56
|
self.timeout = timeout
|
|
41
57
|
self.last_response = {}
|
|
42
|
-
self.model = model
|
|
43
|
-
self.system_prompt = system_prompt
|
|
44
|
-
self.headers = {
|
|
45
|
-
"accept": "application/json",
|
|
46
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
47
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
48
|
-
"authorization": f"Bearer {api_key}",
|
|
49
|
-
"content-type": "application/json",
|
|
50
|
-
"dnt": "1",
|
|
51
|
-
"origin": "https://inference.cerebras.ai",
|
|
52
|
-
"referer": "https://inference.cerebras.ai/",
|
|
53
|
-
"sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
54
|
-
"sec-ch-ua-mobile": "?0",
|
|
55
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
56
|
-
"sec-fetch-dest": "empty",
|
|
57
|
-
"sec-fetch-mode": "cors",
|
|
58
|
-
"sec-fetch-site": "same-site",
|
|
59
|
-
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
|
|
60
|
-
}
|
|
61
58
|
|
|
62
59
|
self.__available_optimizers = (
|
|
63
60
|
method
|
|
64
61
|
for method in dir(Optimizers)
|
|
65
62
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
66
63
|
)
|
|
67
|
-
|
|
64
|
+
|
|
68
65
|
Conversation.intro = (
|
|
69
66
|
AwesomePrompts().get_act(
|
|
70
67
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -76,7 +73,63 @@ class Cerebras(Provider):
|
|
|
76
73
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
77
74
|
)
|
|
78
75
|
self.conversation.history_offset = history_offset
|
|
79
|
-
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@staticmethod
|
|
79
|
+
def extract_query(text: str) -> str:
|
|
80
|
+
"""
|
|
81
|
+
Extracts the first code block from the given text.
|
|
82
|
+
"""
|
|
83
|
+
pattern = r"```(.*?)```"
|
|
84
|
+
matches = re.findall(pattern, text, re.DOTALL)
|
|
85
|
+
return matches[0].strip() if matches else text.strip()
|
|
86
|
+
|
|
87
|
+
@staticmethod
|
|
88
|
+
def refiner(text: str) -> str:
|
|
89
|
+
"""Refines the input text by removing surrounding quotes."""
|
|
90
|
+
return text.strip('"')
|
|
91
|
+
|
|
92
|
+
def get_demo_api_key(self, cookie_path: str) -> str:
|
|
93
|
+
"""Retrieves the demo API key using the provided cookie."""
|
|
94
|
+
try:
|
|
95
|
+
with open(cookie_path, "r") as file:
|
|
96
|
+
cookies = {item["name"]: item["value"] for item in json.load(file)}
|
|
97
|
+
except FileNotFoundError:
|
|
98
|
+
raise FileNotFoundError(f"Cookie file not found at path: {cookie_path}")
|
|
99
|
+
except json.JSONDecodeError:
|
|
100
|
+
raise json.JSONDecodeError("Invalid JSON format in the cookie file.")
|
|
101
|
+
|
|
102
|
+
headers = {
|
|
103
|
+
"Accept": "*/*",
|
|
104
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
105
|
+
"Content-Type": "application/json",
|
|
106
|
+
"Origin": "https://inference.cerebras.ai",
|
|
107
|
+
"Referer": "https://inference.cerebras.ai/",
|
|
108
|
+
"user-agent": UserAgent().random,
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
json_data = {
|
|
112
|
+
"operationName": "GetMyDemoApiKey",
|
|
113
|
+
"variables": {},
|
|
114
|
+
"query": "query GetMyDemoApiKey {\n GetMyDemoApiKey\n}",
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
response = requests.post(
|
|
119
|
+
"https://inference.cerebras.ai/api/graphql",
|
|
120
|
+
cookies=cookies,
|
|
121
|
+
headers=headers,
|
|
122
|
+
json=json_data,
|
|
123
|
+
timeout=self.timeout,
|
|
124
|
+
)
|
|
125
|
+
response.raise_for_status()
|
|
126
|
+
api_key = response.json()["data"]["GetMyDemoApiKey"]
|
|
127
|
+
return api_key
|
|
128
|
+
except requests.exceptions.RequestException as e:
|
|
129
|
+
raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
|
|
130
|
+
except KeyError:
|
|
131
|
+
raise exceptions.InvalidResponseError("API key not found in response.")
|
|
132
|
+
|
|
80
133
|
|
|
81
134
|
def ask(
|
|
82
135
|
self,
|
|
@@ -85,7 +138,8 @@ class Cerebras(Provider):
|
|
|
85
138
|
raw: bool = False,
|
|
86
139
|
optimizer: str = None,
|
|
87
140
|
conversationally: bool = False,
|
|
88
|
-
) -> Dict
|
|
141
|
+
) -> Union[Dict, Generator]:
|
|
142
|
+
|
|
89
143
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
90
144
|
if optimizer:
|
|
91
145
|
if optimizer in self.__available_optimizers:
|
|
@@ -93,60 +147,35 @@ class Cerebras(Provider):
|
|
|
93
147
|
conversation_prompt if conversationally else prompt
|
|
94
148
|
)
|
|
95
149
|
else:
|
|
96
|
-
raise Exception(
|
|
97
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
98
|
-
)
|
|
150
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
99
151
|
|
|
100
|
-
|
|
101
|
-
"
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
],
|
|
105
|
-
"model": self.model,
|
|
106
|
-
"stream": True,
|
|
107
|
-
"temperature": 0.2,
|
|
108
|
-
"top_p": 1,
|
|
109
|
-
"max_tokens": self.max_tokens_to_sample
|
|
110
|
-
}
|
|
152
|
+
messages = [
|
|
153
|
+
{"content": self.system_prompt, "role": "system"},
|
|
154
|
+
{"content": conversation_prompt, "role": "user"},
|
|
155
|
+
]
|
|
111
156
|
|
|
112
157
|
def for_stream():
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
if not response.ok:
|
|
118
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
119
|
-
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
158
|
+
try:
|
|
159
|
+
response = self.client.chat.completions.create(
|
|
160
|
+
model=self.model, messages=messages, stream=True
|
|
120
161
|
)
|
|
162
|
+
for choice in response.choices:
|
|
163
|
+
if choice.delta.content:
|
|
164
|
+
yield dict(text=choice.delta.content)
|
|
165
|
+
self.last_response.update({"text": response.choices[0].message.content})
|
|
121
166
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
if line:
|
|
125
|
-
line_data = line.decode('utf-8').strip()
|
|
126
|
-
if line_data.startswith("data: "):
|
|
127
|
-
json_str = line_data[6:]
|
|
128
|
-
if json_str != "[DONE]":
|
|
129
|
-
chunk = json.loads(json_str)
|
|
130
|
-
if 'choices' in chunk and 'delta' in chunk['choices'][0]:
|
|
131
|
-
content = chunk['choices'][0]['delta'].get('content', '')
|
|
132
|
-
full_response += content
|
|
133
|
-
yield content if raw else dict(text=content)
|
|
134
|
-
else:
|
|
135
|
-
break
|
|
136
|
-
|
|
137
|
-
self.last_response.update(dict(text=full_response))
|
|
138
|
-
self.conversation.update_chat_history(
|
|
139
|
-
prompt, self.get_message(self.last_response)
|
|
140
|
-
)
|
|
167
|
+
except Exception as e:
|
|
168
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during stream: {e}")
|
|
141
169
|
|
|
142
170
|
def for_non_stream():
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
171
|
+
try:
|
|
172
|
+
response = self.client.chat.completions.create(
|
|
173
|
+
model=self.model, messages=messages
|
|
174
|
+
)
|
|
175
|
+
self.last_response.update({"text": response.choices[0].message.content})
|
|
176
|
+
return self.last_response
|
|
177
|
+
except Exception as e:
|
|
178
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during non-stream: {e}")
|
|
150
179
|
|
|
151
180
|
return for_stream() if stream else for_non_stream()
|
|
152
181
|
|
|
@@ -156,44 +185,22 @@ class Cerebras(Provider):
|
|
|
156
185
|
stream: bool = False,
|
|
157
186
|
optimizer: str = None,
|
|
158
187
|
conversationally: bool = False,
|
|
159
|
-
) -> str:
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
prompt,
|
|
163
|
-
):
|
|
164
|
-
yield self.get_message(response)
|
|
165
|
-
|
|
166
|
-
def for_non_stream():
|
|
167
|
-
return self.get_message(
|
|
168
|
-
self.ask(
|
|
169
|
-
prompt,
|
|
170
|
-
False,
|
|
171
|
-
optimizer=optimizer,
|
|
172
|
-
conversationally=conversationally,
|
|
173
|
-
)
|
|
188
|
+
) -> Union[str, Generator]:
|
|
189
|
+
return self.get_message(
|
|
190
|
+
self.ask(
|
|
191
|
+
prompt, stream, optimizer=optimizer, conversationally=conversationally
|
|
174
192
|
)
|
|
175
|
-
|
|
176
|
-
return for_stream() if stream else for_non_stream()
|
|
193
|
+
)
|
|
177
194
|
|
|
178
195
|
def get_message(self, response: dict) -> str:
|
|
179
|
-
"""Retrieves message only from response
|
|
180
|
-
|
|
181
|
-
Args:
|
|
182
|
-
response (dict): Response generated by `self.ask`
|
|
183
|
-
|
|
184
|
-
Returns:
|
|
185
|
-
str: Message extracted
|
|
186
|
-
"""
|
|
196
|
+
"""Retrieves message only from response"""
|
|
187
197
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
188
198
|
return response["text"]
|
|
189
199
|
|
|
190
|
-
|
|
200
|
+
|
|
201
|
+
if __name__ == "__main__":
|
|
191
202
|
from rich import print
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
api_key = "YOUR_API_KEY_HERE"
|
|
195
|
-
|
|
196
|
-
ai = Cerebras(api_key=api_key)
|
|
197
|
-
response = ai.chat(input(">>> "), stream=True)
|
|
203
|
+
cerebras = Cerebras(cookie_path='cookie.json', model='llama3.1-8b', system_prompt="You are a helpful AI assistant.")
|
|
204
|
+
response = cerebras.chat("What is the meaning of life?", sys_prompt='', stream=True)
|
|
198
205
|
for chunk in response:
|
|
199
|
-
print(chunk, end="", flush=True)
|
|
206
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/cleeai.py
CHANGED
|
@@ -207,6 +207,6 @@ class Cleeai(Provider):
|
|
|
207
207
|
if __name__ == "__main__":
|
|
208
208
|
from rich import print
|
|
209
209
|
ai = Cleeai(timeout=5000)
|
|
210
|
-
response = ai.chat("
|
|
210
|
+
response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
|
|
211
211
|
for chunk in response:
|
|
212
212
|
print(chunk, end="", flush=True)
|
webscout/Provider/felo_search.py
CHANGED
|
@@ -175,6 +175,6 @@ class Felo(Provider):
|
|
|
175
175
|
if __name__ == '__main__':
|
|
176
176
|
from rich import print
|
|
177
177
|
ai = Felo()
|
|
178
|
-
response = ai.chat("
|
|
178
|
+
response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
|
|
179
179
|
for chunk in response:
|
|
180
180
|
print(chunk, end="", flush=True)
|