webscout 6.3__py3-none-any.whl → 6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -176
- webscout/AIbase.py +0 -197
- webscout/AIutel.py +488 -1130
- webscout/Bing_search.py +250 -153
- webscout/DWEBS.py +151 -19
- webscout/Extra/__init__.py +2 -1
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder_utiles.py +121 -0
- webscout/Extra/autocoder/rawdog.py +681 -0
- webscout/Extra/autollama.py +246 -195
- webscout/Extra/gguf.py +441 -416
- webscout/LLM.py +206 -43
- webscout/Litlogger/__init__.py +681 -0
- webscout/Provider/DARKAI.py +1 -1
- webscout/Provider/EDITEE.py +1 -1
- webscout/Provider/NinjaChat.py +1 -1
- webscout/Provider/PI.py +221 -207
- webscout/Provider/Perplexity.py +598 -598
- webscout/Provider/RoboCoders.py +206 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
- webscout/Provider/TTI/__init__.py +2 -4
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +184 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
- webscout/Provider/TTI/blackbox/__init__.py +4 -0
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
- webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
- webscout/Provider/TTI/deepinfra/__init__.py +4 -0
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/imgninza/__init__.py +4 -0
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
- webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/__init__.py +146 -139
- webscout/Provider/askmyai.py +2 -2
- webscout/Provider/cerebras.py +227 -219
- webscout/Provider/llama3mitril.py +0 -1
- webscout/Provider/mhystical.py +176 -0
- webscout/Provider/perplexitylabs.py +265 -0
- webscout/Provider/twitterclone.py +251 -245
- webscout/Provider/typegpt.py +359 -0
- webscout/__init__.py +28 -23
- webscout/__main__.py +5 -5
- webscout/cli.py +252 -280
- webscout/conversation.py +227 -0
- webscout/exceptions.py +161 -29
- webscout/litagent/__init__.py +172 -0
- webscout/litprinter/__init__.py +831 -0
- webscout/optimizers.py +270 -0
- webscout/prompt_manager.py +279 -0
- webscout/swiftcli/__init__.py +810 -0
- webscout/transcriber.py +479 -551
- webscout/update_checker.py +125 -0
- webscout/version.py +1 -1
- {webscout-6.3.dist-info → webscout-6.4.dist-info}/METADATA +26 -45
- {webscout-6.3.dist-info → webscout-6.4.dist-info}/RECORD +75 -45
- webscout/Provider/TTI/AIuncensoredimage.py +0 -103
- webscout/Provider/TTI/Nexra.py +0 -120
- webscout/Provider/TTI/PollinationsAI.py +0 -138
- webscout/Provider/TTI/WebSimAI.py +0 -142
- webscout/Provider/TTI/aiforce.py +0 -160
- webscout/Provider/TTI/artbit.py +0 -141
- webscout/Provider/TTI/deepinfra.py +0 -148
- webscout/Provider/TTI/huggingface.py +0 -155
- webscout/Provider/TTI/talkai.py +0 -116
- webscout/models.py +0 -23
- /webscout/{g4f.py → gpt4free.py} +0 -0
- {webscout-6.3.dist-info → webscout-6.4.dist-info}/LICENSE.md +0 -0
- {webscout-6.3.dist-info → webscout-6.4.dist-info}/WHEEL +0 -0
- {webscout-6.3.dist-info → webscout-6.4.dist-info}/entry_points.txt +0 -0
- {webscout-6.3.dist-info → webscout-6.4.dist-info}/top_level.txt +0 -0
webscout/Provider/cerebras.py
CHANGED
|
@@ -1,219 +1,227 @@
|
|
|
1
|
-
import re
|
|
2
|
-
import requests
|
|
3
|
-
import json
|
|
4
|
-
import os
|
|
5
|
-
from typing import Any, Dict, Optional, Generator, List, Union
|
|
6
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
-
from webscout.AIbase import Provider
|
|
8
|
-
from webscout import exceptions
|
|
9
|
-
from fake_useragent import UserAgent
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class Cerebras
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
self.
|
|
35
|
-
self.
|
|
36
|
-
self.
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
# Initialize
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
)
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
@staticmethod
|
|
72
|
-
def
|
|
73
|
-
"""
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
"
|
|
97
|
-
"
|
|
98
|
-
"
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
response
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
)
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
1
|
+
import re
|
|
2
|
+
import requests
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from typing import Any, Dict, Optional, Generator, List, Union
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from fake_useragent import UserAgent
|
|
10
|
+
|
|
11
|
+
class Cerebras(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the Cerebras API using a cookie for authentication.
|
|
14
|
+
"""
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
is_conversation: bool = True,
|
|
18
|
+
max_tokens: int = 2049,
|
|
19
|
+
timeout: int = 30,
|
|
20
|
+
intro: str = None,
|
|
21
|
+
filepath: str = None,
|
|
22
|
+
update_file: bool = True,
|
|
23
|
+
proxies: dict = {},
|
|
24
|
+
history_offset: int = 10250,
|
|
25
|
+
act: str = None,
|
|
26
|
+
cookie_path: str = "cookie.json",
|
|
27
|
+
model: str = "llama3.1-8b",
|
|
28
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
29
|
+
):
|
|
30
|
+
# Initialize basic settings first
|
|
31
|
+
self.timeout = timeout
|
|
32
|
+
self.model = model
|
|
33
|
+
self.system_prompt = system_prompt
|
|
34
|
+
self.is_conversation = is_conversation
|
|
35
|
+
self.max_tokens_to_sample = max_tokens
|
|
36
|
+
self.last_response = {}
|
|
37
|
+
|
|
38
|
+
# Get API key first
|
|
39
|
+
try:
|
|
40
|
+
self.api_key = self.get_demo_api_key(cookie_path)
|
|
41
|
+
except Exception as e:
|
|
42
|
+
raise exceptions.APIConnectionError(f"Failed to initialize Cerebras client: {e}")
|
|
43
|
+
|
|
44
|
+
# Initialize optimizers
|
|
45
|
+
self.__available_optimizers = (
|
|
46
|
+
method
|
|
47
|
+
for method in dir(Optimizers)
|
|
48
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# Initialize conversation settings
|
|
52
|
+
Conversation.intro = (
|
|
53
|
+
AwesomePrompts().get_act(
|
|
54
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
55
|
+
)
|
|
56
|
+
if act
|
|
57
|
+
else None
|
|
58
|
+
)
|
|
59
|
+
self.conversation = Conversation(
|
|
60
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
61
|
+
)
|
|
62
|
+
self.conversation.history_offset = history_offset
|
|
63
|
+
|
|
64
|
+
@staticmethod
|
|
65
|
+
def extract_query(text: str) -> str:
|
|
66
|
+
"""Extracts the first code block from the given text."""
|
|
67
|
+
pattern = r"```(.*?)```"
|
|
68
|
+
matches = re.findall(pattern, text, re.DOTALL)
|
|
69
|
+
return matches[0].strip() if matches else text.strip()
|
|
70
|
+
|
|
71
|
+
@staticmethod
|
|
72
|
+
def refiner(text: str) -> str:
|
|
73
|
+
"""Refines the input text by removing surrounding quotes."""
|
|
74
|
+
return text.strip('"')
|
|
75
|
+
|
|
76
|
+
def get_demo_api_key(self, cookie_path: str) -> str:
|
|
77
|
+
"""Retrieves the demo API key using the provided cookie."""
|
|
78
|
+
try:
|
|
79
|
+
with open(cookie_path, "r") as file:
|
|
80
|
+
cookies = {item["name"]: item["value"] for item in json.load(file)}
|
|
81
|
+
except FileNotFoundError:
|
|
82
|
+
raise FileNotFoundError(f"Cookie file not found at path: {cookie_path}")
|
|
83
|
+
except json.JSONDecodeError:
|
|
84
|
+
raise json.JSONDecodeError("Invalid JSON format in the cookie file.", "", 0)
|
|
85
|
+
|
|
86
|
+
headers = {
|
|
87
|
+
"Accept": "*/*",
|
|
88
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
89
|
+
"Content-Type": "application/json",
|
|
90
|
+
"Origin": "https://inference.cerebras.ai",
|
|
91
|
+
"Referer": "https://inference.cerebras.ai/",
|
|
92
|
+
"user-agent": UserAgent().random,
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
json_data = {
|
|
96
|
+
"operationName": "GetMyDemoApiKey",
|
|
97
|
+
"variables": {},
|
|
98
|
+
"query": "query GetMyDemoApiKey {\n GetMyDemoApiKey\n}",
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
response = requests.post(
|
|
103
|
+
"https://inference.cerebras.ai/api/graphql",
|
|
104
|
+
cookies=cookies,
|
|
105
|
+
headers=headers,
|
|
106
|
+
json=json_data,
|
|
107
|
+
timeout=self.timeout,
|
|
108
|
+
)
|
|
109
|
+
response.raise_for_status()
|
|
110
|
+
api_key = response.json()["data"]["GetMyDemoApiKey"]
|
|
111
|
+
return api_key
|
|
112
|
+
except requests.exceptions.RequestException as e:
|
|
113
|
+
raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
|
|
114
|
+
except KeyError:
|
|
115
|
+
raise exceptions.InvalidResponseError("API key not found in response.")
|
|
116
|
+
|
|
117
|
+
def _make_request(self, messages: List[Dict], stream: bool = False) -> Union[Dict, Generator]:
|
|
118
|
+
"""Make a request to the Cerebras API."""
|
|
119
|
+
headers = {
|
|
120
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
121
|
+
"Content-Type": "application/json",
|
|
122
|
+
"User-Agent": UserAgent().random
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
payload = {
|
|
126
|
+
"model": self.model,
|
|
127
|
+
"messages": messages,
|
|
128
|
+
"stream": stream
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
response = requests.post(
|
|
133
|
+
"https://api.cerebras.ai/v1/chat/completions",
|
|
134
|
+
headers=headers,
|
|
135
|
+
json=payload,
|
|
136
|
+
stream=stream,
|
|
137
|
+
timeout=self.timeout
|
|
138
|
+
)
|
|
139
|
+
response.raise_for_status()
|
|
140
|
+
|
|
141
|
+
if stream:
|
|
142
|
+
def generate_stream():
|
|
143
|
+
for line in response.iter_lines():
|
|
144
|
+
if line:
|
|
145
|
+
line = line.decode('utf-8')
|
|
146
|
+
if line.startswith('data:'):
|
|
147
|
+
try:
|
|
148
|
+
data = json.loads(line[6:])
|
|
149
|
+
if data.get('choices') and data['choices'][0].get('delta', {}).get('content'):
|
|
150
|
+
content = data['choices'][0]['delta']['content']
|
|
151
|
+
yield content
|
|
152
|
+
except json.JSONDecodeError:
|
|
153
|
+
continue
|
|
154
|
+
|
|
155
|
+
return generate_stream()
|
|
156
|
+
else:
|
|
157
|
+
response_json = response.json()
|
|
158
|
+
return response_json['choices'][0]['message']['content']
|
|
159
|
+
|
|
160
|
+
except requests.exceptions.RequestException as e:
|
|
161
|
+
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
162
|
+
|
|
163
|
+
def ask(
|
|
164
|
+
self,
|
|
165
|
+
prompt: str,
|
|
166
|
+
stream: bool = False,
|
|
167
|
+
optimizer: str = None,
|
|
168
|
+
conversationally: bool = False,
|
|
169
|
+
) -> Union[Dict, Generator]:
|
|
170
|
+
"""Send a prompt to the model and get a response."""
|
|
171
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
172
|
+
if optimizer:
|
|
173
|
+
if optimizer in self.__available_optimizers:
|
|
174
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
175
|
+
conversation_prompt if conversationally else prompt
|
|
176
|
+
)
|
|
177
|
+
else:
|
|
178
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
179
|
+
|
|
180
|
+
messages = [
|
|
181
|
+
{"role": "system", "content": self.system_prompt},
|
|
182
|
+
{"role": "user", "content": conversation_prompt}
|
|
183
|
+
]
|
|
184
|
+
|
|
185
|
+
try:
|
|
186
|
+
response = self._make_request(messages, stream)
|
|
187
|
+
if stream:
|
|
188
|
+
return response
|
|
189
|
+
|
|
190
|
+
self.last_response = response
|
|
191
|
+
return response
|
|
192
|
+
|
|
193
|
+
except Exception as e:
|
|
194
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
|
|
195
|
+
|
|
196
|
+
def chat(
|
|
197
|
+
self,
|
|
198
|
+
prompt: str,
|
|
199
|
+
stream: bool = False,
|
|
200
|
+
optimizer: str = None,
|
|
201
|
+
conversationally: bool = False,
|
|
202
|
+
) -> Union[str, Generator]:
|
|
203
|
+
"""Chat with the model."""
|
|
204
|
+
response = self.ask(prompt, stream, optimizer, conversationally)
|
|
205
|
+
if stream:
|
|
206
|
+
return response
|
|
207
|
+
return response
|
|
208
|
+
|
|
209
|
+
def get_message(self, response: str) -> str:
|
|
210
|
+
"""Retrieves message from response."""
|
|
211
|
+
return response
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
if __name__ == "__main__":
|
|
215
|
+
from rich import print
|
|
216
|
+
|
|
217
|
+
# Example usage
|
|
218
|
+
cerebras = Cerebras(
|
|
219
|
+
cookie_path=r'C:\Users\koula\OneDrive\Desktop\Webscout\cookie.json',
|
|
220
|
+
model='llama3.1-8b',
|
|
221
|
+
system_prompt="You are a helpful AI assistant."
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
# Test with streaming
|
|
225
|
+
response = cerebras.chat("Hello!", stream=True)
|
|
226
|
+
for chunk in response:
|
|
227
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Optional, Generator, List
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Mhystical(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the Mhystical API. Improved to meet webscout provider standards.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
AVAILABLE_MODELS = ["gpt-4", "gpt-3.5-turbo"] # Add available models
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
is_conversation: bool = True,
|
|
22
|
+
max_tokens: int = 2048,
|
|
23
|
+
timeout: int = 30,
|
|
24
|
+
intro: str = None,
|
|
25
|
+
filepath: str = None,
|
|
26
|
+
update_file: bool = True,
|
|
27
|
+
proxies: dict = {},
|
|
28
|
+
history_offset: int = 10250,
|
|
29
|
+
act: str = None,
|
|
30
|
+
model: str = "gpt-4", # Default model
|
|
31
|
+
system_prompt: str = "You are a helpful AI assistant." # Default system prompt
|
|
32
|
+
):
|
|
33
|
+
"""Initializes the Mhystical API."""
|
|
34
|
+
if model not in self.AVAILABLE_MODELS:
|
|
35
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
36
|
+
|
|
37
|
+
self.session = requests.Session()
|
|
38
|
+
self.is_conversation = is_conversation
|
|
39
|
+
self.max_tokens_to_sample = max_tokens
|
|
40
|
+
self.api_endpoint = "https://api.mhystical.cc/v1/completions"
|
|
41
|
+
self.timeout = timeout
|
|
42
|
+
self.last_response = {}
|
|
43
|
+
self.model = model
|
|
44
|
+
self.system_prompt = system_prompt # Store system prompt
|
|
45
|
+
self.headers = {
|
|
46
|
+
"x-api-key": "mhystical", # Set API key in header (or better, in __init__ from parameter)
|
|
47
|
+
"Content-Type": "application/json",
|
|
48
|
+
"accept": "*/*",
|
|
49
|
+
"user-agent": "Mozilla/5.0"
|
|
50
|
+
}
|
|
51
|
+
self.__available_optimizers = (
|
|
52
|
+
method
|
|
53
|
+
for method in dir(Optimizers)
|
|
54
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
55
|
+
)
|
|
56
|
+
Conversation.intro = (
|
|
57
|
+
AwesomePrompts().get_act(
|
|
58
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
59
|
+
)
|
|
60
|
+
if act
|
|
61
|
+
else intro or Conversation.intro
|
|
62
|
+
)
|
|
63
|
+
self.conversation = Conversation(
|
|
64
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
65
|
+
)
|
|
66
|
+
self.conversation.history_offset = history_offset
|
|
67
|
+
self.session.proxies = proxies
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def ask(
|
|
71
|
+
self,
|
|
72
|
+
prompt: str,
|
|
73
|
+
stream: bool = False,
|
|
74
|
+
raw: bool = False,
|
|
75
|
+
optimizer: str = None,
|
|
76
|
+
conversationally: bool = False,
|
|
77
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
78
|
+
|
|
79
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
80
|
+
if optimizer:
|
|
81
|
+
if optimizer in self.__available_optimizers:
|
|
82
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
83
|
+
conversation_prompt if conversationally else prompt
|
|
84
|
+
)
|
|
85
|
+
else:
|
|
86
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
87
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
messages = [
|
|
91
|
+
{"role": "system", "content": self.system_prompt}, # Include system prompt
|
|
92
|
+
{"role": "user", "content": conversation_prompt},
|
|
93
|
+
]
|
|
94
|
+
|
|
95
|
+
data = {
|
|
96
|
+
"model": self.model, # Now using self.model
|
|
97
|
+
"messages": messages # Pass messages to API
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
def for_stream():
|
|
101
|
+
try:
|
|
102
|
+
with requests.post(self.api_endpoint, headers=self.headers, json=data, stream=True, timeout=self.timeout) as response:
|
|
103
|
+
response.raise_for_status() # Raise exceptions for HTTP errors
|
|
104
|
+
|
|
105
|
+
# Emulate streaming for this API
|
|
106
|
+
full_response = "" # Accumulate the full response
|
|
107
|
+
for chunk in response.iter_content(decode_unicode=True, chunk_size=self.stream_chunk_size):
|
|
108
|
+
if chunk:
|
|
109
|
+
full_response += chunk
|
|
110
|
+
yield chunk if raw else {"text": chunk}
|
|
111
|
+
|
|
112
|
+
self.last_response.update({"text": full_response})
|
|
113
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
114
|
+
except requests.exceptions.RequestException as e:
|
|
115
|
+
raise exceptions.ProviderConnectionError(f"Network error: {str(e)}")
|
|
116
|
+
|
|
117
|
+
def for_non_stream():
|
|
118
|
+
try:
|
|
119
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=data, timeout=self.timeout)
|
|
120
|
+
response.raise_for_status()
|
|
121
|
+
|
|
122
|
+
full_response = self._parse_response(response.text)
|
|
123
|
+
self.last_response.update({"text": full_response})
|
|
124
|
+
|
|
125
|
+
# Yield the entire response as a single chunk
|
|
126
|
+
yield {"text": full_response}
|
|
127
|
+
|
|
128
|
+
except requests.exceptions.RequestException as e:
|
|
129
|
+
raise exceptions.ProviderConnectionError(f"Network error: {str(e)}")
|
|
130
|
+
|
|
131
|
+
return for_stream() if stream else for_non_stream()
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def chat(
|
|
136
|
+
self,
|
|
137
|
+
prompt: str,
|
|
138
|
+
stream: bool = False,
|
|
139
|
+
optimizer: str = None,
|
|
140
|
+
conversationally: bool = False,
|
|
141
|
+
) -> str | Generator[str, None, None]:
|
|
142
|
+
|
|
143
|
+
def for_stream():
|
|
144
|
+
for response in self.ask(
|
|
145
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
146
|
+
):
|
|
147
|
+
yield self.get_message(response)
|
|
148
|
+
|
|
149
|
+
def for_non_stream():
|
|
150
|
+
response = next(self.ask(
|
|
151
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
152
|
+
))
|
|
153
|
+
return self.get_message(response)
|
|
154
|
+
return for_stream() if stream else for_non_stream()
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def get_message(self, response: dict) -> str:
|
|
159
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
160
|
+
return response["text"]
|
|
161
|
+
|
|
162
|
+
@staticmethod
|
|
163
|
+
def _parse_response(response_text: str) -> str:
|
|
164
|
+
"""Parse and validate API response."""
|
|
165
|
+
try:
|
|
166
|
+
data = json.loads(response_text)
|
|
167
|
+
return data["choices"][0]["message"]["content"].strip()
|
|
168
|
+
except (json.JSONDecodeError, KeyError, IndexError) as e:
|
|
169
|
+
raise exceptions.InvalidResponseError(f"Failed to parse response: {str(e)}")
|
|
170
|
+
|
|
171
|
+
if __name__ == "__main__":
|
|
172
|
+
from rich import print
|
|
173
|
+
ai = Mhystical()
|
|
174
|
+
response = ai.chat(input(">>> "))
|
|
175
|
+
for chunk in response:
|
|
176
|
+
print(chunk, end="", flush=True)
|