webscout 6.2b0__py3-none-any.whl → 6.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIbase.py +309 -239
- webscout/Agents/functioncall.py +248 -198
- webscout/DWEBS.py +322 -178
- webscout/Extra/gguf.py +250 -60
- webscout/Extra/weather.py +172 -67
- webscout/LLM.py +279 -100
- webscout/Local/formats.py +4 -2
- webscout/Provider/Amigo.py +19 -10
- webscout/Provider/Andi.py +0 -33
- webscout/Provider/Blackboxai.py +4 -204
- webscout/Provider/Llama3.py +1 -1
- webscout/Provider/Marcus.py +137 -0
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/talkai.py +116 -0
- webscout/Provider/__init__.py +10 -3
- webscout/Provider/askmyai.py +158 -0
- webscout/Provider/cerebras.py +71 -58
- webscout/Provider/geminiapi.py +208 -198
- webscout/Provider/llama3mitril.py +181 -0
- webscout/Provider/llmchat.py +203 -0
- webscout/Provider/talkai.py +196 -0
- webscout/Provider/twitterclone.py +7 -6
- webscout/cli.py +354 -346
- webscout/version.py +1 -1
- webscout-6.3.dist-info/LICENSE.md +211 -0
- {webscout-6.2b0.dist-info → webscout-6.3.dist-info}/METADATA +11 -13
- {webscout-6.2b0.dist-info → webscout-6.3.dist-info}/RECORD +31 -25
- webscout-6.2b0.dist-info/LICENSE.md +0 -50
- /webscout/Provider/TTI/{AIuncensored.py → AIuncensoredimage.py} +0 -0
- {webscout-6.2b0.dist-info → webscout-6.3.dist-info}/WHEEL +0 -0
- {webscout-6.2b0.dist-info → webscout-6.3.dist-info}/entry_points.txt +0 -0
- {webscout-6.2b0.dist-info → webscout-6.3.dist-info}/top_level.txt +0 -0
webscout/LLM.py
CHANGED
|
@@ -1,100 +1,279 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import base64
|
|
3
|
-
|
|
4
|
-
import
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
'
|
|
26
|
-
'
|
|
27
|
-
'
|
|
28
|
-
'
|
|
29
|
-
'
|
|
30
|
-
'
|
|
31
|
-
'
|
|
32
|
-
'
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
'
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
1
|
+
import requests
|
|
2
|
+
import base64
|
|
3
|
+
import json
|
|
4
|
+
from typing import List, Dict, Union, Generator, Optional, Any
|
|
5
|
+
|
|
6
|
+
class LLMError(Exception):
|
|
7
|
+
"""Custom exception for LLM API errors"""
|
|
8
|
+
pass
|
|
9
|
+
|
|
10
|
+
class LLM:
|
|
11
|
+
"""A class for interacting with the DeepInfra LLM API."""
|
|
12
|
+
|
|
13
|
+
def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
|
|
14
|
+
"""
|
|
15
|
+
Initialize the LLM client.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
model: The model identifier (e.g., "meta-llama/Meta-Llama-3-70B-Instruct")
|
|
19
|
+
system_message: The system message to use for the conversation
|
|
20
|
+
"""
|
|
21
|
+
self.model = model
|
|
22
|
+
self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
23
|
+
self.conversation_history = [{"role": "system", "content": system_message}]
|
|
24
|
+
self.headers = {
|
|
25
|
+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
|
26
|
+
'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
|
27
|
+
'Cache-Control': 'no-cache',
|
|
28
|
+
'Connection': 'keep-alive',
|
|
29
|
+
'Content-Type': 'application/json',
|
|
30
|
+
'Origin': 'https://deepinfra.com',
|
|
31
|
+
'Pragma': 'no-cache',
|
|
32
|
+
'Referer': 'https://deepinfra.com/',
|
|
33
|
+
'Sec-Fetch-Dest': 'empty',
|
|
34
|
+
'Sec-Fetch-Mode': 'cors',
|
|
35
|
+
'Sec-Fetch-Site': 'same-site',
|
|
36
|
+
'X-Deepinfra-Source': 'web-embed',
|
|
37
|
+
'accept': 'text/event-stream',
|
|
38
|
+
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
|
|
39
|
+
'sec-ch-ua-mobile': '?0',
|
|
40
|
+
'sec-ch-ua-platform': '"macOS"'
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
def _prepare_payload(
|
|
44
|
+
self,
|
|
45
|
+
messages: List[Dict[str, str]],
|
|
46
|
+
stream: bool = False,
|
|
47
|
+
temperature: float = 0.7,
|
|
48
|
+
max_tokens: int = 8028,
|
|
49
|
+
stop: Optional[List[str]] = None,
|
|
50
|
+
) -> Dict[str, Any]:
|
|
51
|
+
"""Prepare the API request payload."""
|
|
52
|
+
return {
|
|
53
|
+
'model': self.model,
|
|
54
|
+
'messages': messages,
|
|
55
|
+
'temperature': temperature,
|
|
56
|
+
'max_tokens': max_tokens,
|
|
57
|
+
'stop': stop or [],
|
|
58
|
+
'stream': stream
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
def chat(
|
|
62
|
+
self,
|
|
63
|
+
messages: List[Dict[str, str]],
|
|
64
|
+
stream: bool = False,
|
|
65
|
+
temperature: float = 0.7,
|
|
66
|
+
max_tokens: int = 8028,
|
|
67
|
+
stop: Optional[List[str]] = None,
|
|
68
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
69
|
+
"""
|
|
70
|
+
Send a chat request to the DeepInfra API.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
messages: List of message dictionaries with 'role' and 'content'
|
|
74
|
+
stream: Whether to stream the response
|
|
75
|
+
temperature: Sampling temperature (0-1)
|
|
76
|
+
max_tokens: Maximum tokens to generate
|
|
77
|
+
stop: Optional list of stop sequences
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Either a string response or a generator for streaming
|
|
81
|
+
|
|
82
|
+
Raises:
|
|
83
|
+
LLMError: If the API request fails
|
|
84
|
+
"""
|
|
85
|
+
payload = self._prepare_payload(messages, stream, temperature, max_tokens, stop)
|
|
86
|
+
|
|
87
|
+
try:
|
|
88
|
+
if stream:
|
|
89
|
+
return self._stream_response(payload)
|
|
90
|
+
else:
|
|
91
|
+
return self._send_request(payload)
|
|
92
|
+
except Exception as e:
|
|
93
|
+
raise LLMError(f"API request failed: {str(e)}")
|
|
94
|
+
|
|
95
|
+
def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
|
|
96
|
+
"""Stream the chat response."""
|
|
97
|
+
try:
|
|
98
|
+
with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
|
|
99
|
+
response.raise_for_status()
|
|
100
|
+
for line in response.iter_lines():
|
|
101
|
+
if line:
|
|
102
|
+
if line.strip() == b'data: [DONE]':
|
|
103
|
+
break
|
|
104
|
+
if line.startswith(b'data: '):
|
|
105
|
+
try:
|
|
106
|
+
chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
|
|
107
|
+
if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
|
|
108
|
+
yield content
|
|
109
|
+
except json.JSONDecodeError:
|
|
110
|
+
continue
|
|
111
|
+
except requests.RequestException as e:
|
|
112
|
+
raise LLMError(f"Stream request failed: {str(e)}")
|
|
113
|
+
|
|
114
|
+
def _send_request(self, payload: Dict[str, Any]) -> str:
|
|
115
|
+
"""Send a non-streaming chat request."""
|
|
116
|
+
try:
|
|
117
|
+
response = requests.post(self.api_url, json=payload, headers=self.headers)
|
|
118
|
+
response.raise_for_status()
|
|
119
|
+
result = response.json()
|
|
120
|
+
return result['choices'][0]['message']['content']
|
|
121
|
+
except requests.RequestException as e:
|
|
122
|
+
raise LLMError(f"Request failed: {str(e)}")
|
|
123
|
+
except (KeyError, IndexError) as e:
|
|
124
|
+
raise LLMError(f"Invalid response format: {str(e)}")
|
|
125
|
+
except json.JSONDecodeError as e:
|
|
126
|
+
raise LLMError(f"Invalid JSON response: {str(e)}")
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class VLM:
|
|
130
|
+
"""A class for interacting with the DeepInfra VLM (Vision Language Model) API."""
|
|
131
|
+
|
|
132
|
+
def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
|
|
133
|
+
"""
|
|
134
|
+
Initialize the VLM client.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
model: The model identifier
|
|
138
|
+
system_message: The system message to use for the conversation
|
|
139
|
+
"""
|
|
140
|
+
self.model = model
|
|
141
|
+
self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
142
|
+
self.conversation_history = [{"role": "system", "content": system_message}]
|
|
143
|
+
self.headers = {
|
|
144
|
+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
|
145
|
+
'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
|
146
|
+
'Cache-Control': 'no-cache',
|
|
147
|
+
'Connection': 'keep-alive',
|
|
148
|
+
'Content-Type': 'application/json',
|
|
149
|
+
'Origin': 'https://deepinfra.com',
|
|
150
|
+
'Pragma': 'no-cache',
|
|
151
|
+
'Referer': 'https://deepinfra.com/',
|
|
152
|
+
'Sec-Fetch-Dest': 'empty',
|
|
153
|
+
'Sec-Fetch-Mode': 'cors',
|
|
154
|
+
'Sec-Fetch-Site': 'same-site',
|
|
155
|
+
'X-Deepinfra-Source': 'web-embed',
|
|
156
|
+
'accept': 'text/event-stream',
|
|
157
|
+
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
|
|
158
|
+
'sec-ch-ua-mobile': '?0',
|
|
159
|
+
'sec-ch-ua-platform': '"macOS"'
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
def chat(
|
|
163
|
+
self,
|
|
164
|
+
messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]],
|
|
165
|
+
stream: bool = False,
|
|
166
|
+
temperature: float = 0.7,
|
|
167
|
+
max_tokens: int = 8028,
|
|
168
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
169
|
+
"""
|
|
170
|
+
Send a chat request with image support to the DeepInfra API.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
messages: List of message dictionaries that may include image data
|
|
174
|
+
stream: Whether to stream the response
|
|
175
|
+
temperature: Sampling temperature (0-1)
|
|
176
|
+
max_tokens: Maximum tokens to generate
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
Either a string response or a generator for streaming
|
|
180
|
+
|
|
181
|
+
Raises:
|
|
182
|
+
LLMError: If the API request fails
|
|
183
|
+
"""
|
|
184
|
+
payload = {
|
|
185
|
+
"model": self.model,
|
|
186
|
+
"messages": messages,
|
|
187
|
+
"stream": stream,
|
|
188
|
+
"temperature": temperature,
|
|
189
|
+
"max_tokens": max_tokens
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
try:
|
|
193
|
+
if stream:
|
|
194
|
+
return self._stream_response(payload)
|
|
195
|
+
else:
|
|
196
|
+
return self._send_request(payload)
|
|
197
|
+
except Exception as e:
|
|
198
|
+
raise LLMError(f"VLM API request failed: {str(e)}")
|
|
199
|
+
|
|
200
|
+
def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
|
|
201
|
+
"""Stream the VLM chat response."""
|
|
202
|
+
try:
|
|
203
|
+
with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
|
|
204
|
+
response.raise_for_status()
|
|
205
|
+
for line in response.iter_lines():
|
|
206
|
+
if line:
|
|
207
|
+
if line.strip() == b'data: [DONE]':
|
|
208
|
+
break
|
|
209
|
+
if line.startswith(b'data: '):
|
|
210
|
+
try:
|
|
211
|
+
chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
|
|
212
|
+
if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
|
|
213
|
+
yield content
|
|
214
|
+
except json.JSONDecodeError:
|
|
215
|
+
continue
|
|
216
|
+
except requests.RequestException as e:
|
|
217
|
+
raise LLMError(f"VLM stream request failed: {str(e)}")
|
|
218
|
+
|
|
219
|
+
def _send_request(self, payload: Dict[str, Any]) -> str:
|
|
220
|
+
"""Send a non-streaming VLM chat request."""
|
|
221
|
+
try:
|
|
222
|
+
response = requests.post(self.api_url, json=payload, headers=self.headers)
|
|
223
|
+
response.raise_for_status()
|
|
224
|
+
result = response.json()
|
|
225
|
+
return result['choices'][0]['message']['content']
|
|
226
|
+
except requests.RequestException as e:
|
|
227
|
+
raise LLMError(f"VLM request failed: {str(e)}")
|
|
228
|
+
except (KeyError, IndexError) as e:
|
|
229
|
+
raise LLMError(f"Invalid VLM response format: {str(e)}")
|
|
230
|
+
except json.JSONDecodeError as e:
|
|
231
|
+
raise LLMError(f"Invalid VLM JSON response: {str(e)}")
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def encode_image_to_base64(image_path: str) -> str:
|
|
235
|
+
"""
|
|
236
|
+
Encode an image file to base64 string.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
image_path: Path to the image file
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
Base64 encoded string of the image
|
|
243
|
+
|
|
244
|
+
Raises:
|
|
245
|
+
IOError: If the image file cannot be read
|
|
246
|
+
"""
|
|
247
|
+
try:
|
|
248
|
+
with open(image_path, "rb") as image_file:
|
|
249
|
+
return base64.b64encode(image_file.read()).decode("utf-8")
|
|
250
|
+
except IOError as e:
|
|
251
|
+
raise LLMError(f"Failed to read image file: {str(e)}")
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
if __name__ == "__main__":
|
|
255
|
+
# Example usage
|
|
256
|
+
try:
|
|
257
|
+
# Initialize LLM with Llama 3 model
|
|
258
|
+
llm = LLM(model="Qwen/Qwen2.5-Coder-32B-Instruct")
|
|
259
|
+
|
|
260
|
+
# Example messages
|
|
261
|
+
messages = [
|
|
262
|
+
{"role": "user", "content": "Write a short poem about AI."}
|
|
263
|
+
]
|
|
264
|
+
|
|
265
|
+
# Example 1: Non-streaming response
|
|
266
|
+
print("\nNon-streaming response:")
|
|
267
|
+
response = llm.chat(messages, stream=False)
|
|
268
|
+
print(response)
|
|
269
|
+
|
|
270
|
+
# Example 2: Streaming response
|
|
271
|
+
print("\nStreaming response:")
|
|
272
|
+
for chunk in llm.chat(messages, stream=True):
|
|
273
|
+
print(chunk, end='', flush=True)
|
|
274
|
+
print("\n")
|
|
275
|
+
|
|
276
|
+
except LLMError as e:
|
|
277
|
+
print(f"Error: {str(e)}")
|
|
278
|
+
except KeyboardInterrupt:
|
|
279
|
+
print("\nOperation cancelled by user")
|
webscout/Local/formats.py
CHANGED
|
@@ -291,7 +291,7 @@ llama2chat: dict[str, str | list] = {
|
|
|
291
291
|
# https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/
|
|
292
292
|
llama3: dict[str, str | list] = {
|
|
293
293
|
"system_prefix": "<|start_header_id|>system<|end_header_id|>\n\n",
|
|
294
|
-
"system_prompt": 'You are a helpful AI assistant
|
|
294
|
+
"system_prompt": 'You are a helpful AI assistant.',
|
|
295
295
|
"system_suffix": "<|eot_id|>\n",
|
|
296
296
|
"user_prefix": "<|start_header_id|>user<|end_header_id|>\n\n",
|
|
297
297
|
"user_suffix": "<|eot_id|>\n",
|
|
@@ -628,6 +628,8 @@ airoboros = llama2chat.copy()
|
|
|
628
628
|
airoboros['system_prompt'] = \
|
|
629
629
|
"You are a helpful, unbiased, uncensored assistant."
|
|
630
630
|
tess = synthia.copy()
|
|
631
|
+
helpingai2 = llama3.copy()
|
|
632
|
+
helpingai2['system_prompt'] = "You are HelpingAI, an emotional AI. Always answer my questions in the HelpingAI style."
|
|
631
633
|
tess['system_prompt'] = '' # Tess can use any system prompt, or none
|
|
632
634
|
alpaca_strict = alpaca.copy() # Alpaca with more stopping strings
|
|
633
635
|
alpaca_strict['stops'] = [
|
|
@@ -651,7 +653,7 @@ def _llama3_suffix_with_timestamp():
|
|
|
651
653
|
|
|
652
654
|
Llama3WithTimestamps = AdvancedFormat({
|
|
653
655
|
"system_prefix": "<|start_header_id|>system<|end_header_id|>\n\n",
|
|
654
|
-
"system_prompt": 'You are a helpful AI assistant
|
|
656
|
+
"system_prompt": 'You are a helpful AI assistant.',
|
|
655
657
|
"system_suffix": _llama3_suffix_with_timestamp,
|
|
656
658
|
"user_prefix": "<|start_header_id|>user<|end_header_id|>\n\n",
|
|
657
659
|
"user_suffix": _llama3_suffix_with_timestamp,
|
webscout/Provider/Amigo.py
CHANGED
|
@@ -16,12 +16,17 @@ class AmigoChat(Provider):
|
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
18
|
AVAILABLE_MODELS = [
|
|
19
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
|
20
|
-
"o1-mini",
|
|
21
|
-
"claude-3-sonnet-20240229",
|
|
22
|
-
"gemini-1.5-pro",
|
|
23
|
-
"gemini-1-5-flash",
|
|
24
|
-
"o1-preview",
|
|
19
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", # Llama 3
|
|
20
|
+
"o1-mini", # OpenAI O1 Mini
|
|
21
|
+
"claude-3-sonnet-20240229", # Claude Sonnet
|
|
22
|
+
"gemini-1.5-pro", # Gemini Pro
|
|
23
|
+
"gemini-1-5-flash", # Gemini Flash
|
|
24
|
+
"o1-preview", # OpenAI O1 Preview
|
|
25
|
+
"claude-3-5-sonnet-20241022", # Claude 3.5 Sonnet
|
|
26
|
+
"Qwen/Qwen2.5-72B-Instruct-Turbo", # Qwen 2.5
|
|
27
|
+
"gpt-4o" # OpenAI GPT-4o
|
|
28
|
+
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo" # Llama 3.2
|
|
29
|
+
|
|
25
30
|
]
|
|
26
31
|
|
|
27
32
|
def __init__(
|
|
@@ -29,13 +34,15 @@ class AmigoChat(Provider):
|
|
|
29
34
|
is_conversation: bool = True,
|
|
30
35
|
max_tokens: int = 600,
|
|
31
36
|
timeout: int = 30,
|
|
37
|
+
temperature: float = 1,
|
|
32
38
|
intro: str = None,
|
|
33
39
|
filepath: str = None,
|
|
40
|
+
top_p: float = 0.95,
|
|
34
41
|
update_file: bool = True,
|
|
35
42
|
proxies: dict = {},
|
|
36
43
|
history_offset: int = 10250,
|
|
37
44
|
act: str = None,
|
|
38
|
-
model: str = "
|
|
45
|
+
model: str = "Qwen/Qwen2.5-72B-Instruct-Turbo", # Default model
|
|
39
46
|
system_prompt: str = "You are a helpful and friendly AI assistant.",
|
|
40
47
|
):
|
|
41
48
|
"""
|
|
@@ -68,8 +75,10 @@ class AmigoChat(Provider):
|
|
|
68
75
|
self.api_endpoint = "https://api.amigochat.io/v1/chat/completions"
|
|
69
76
|
self.stream_chunk_size = 64
|
|
70
77
|
self.timeout = timeout
|
|
78
|
+
self.temperature = temperature
|
|
71
79
|
self.last_response = {}
|
|
72
80
|
self.model = model
|
|
81
|
+
self.top_p = top_p
|
|
73
82
|
self.headers = {
|
|
74
83
|
"Accept": "*/*",
|
|
75
84
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
@@ -158,11 +167,11 @@ class AmigoChat(Provider):
|
|
|
158
167
|
],
|
|
159
168
|
"model": self.model,
|
|
160
169
|
"frequency_penalty": 0,
|
|
161
|
-
"max_tokens":
|
|
170
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
162
171
|
"presence_penalty": 0,
|
|
163
172
|
"stream": stream,
|
|
164
|
-
"temperature":
|
|
165
|
-
"top_p":
|
|
173
|
+
"temperature":self.temperature,
|
|
174
|
+
"top_p": self.top_p
|
|
166
175
|
}
|
|
167
176
|
|
|
168
177
|
def for_stream():
|
webscout/Provider/Andi.py
CHANGED
|
@@ -92,40 +92,7 @@ class AndiSearch(Provider):
|
|
|
92
92
|
optimizer: str = None,
|
|
93
93
|
conversationally: bool = False,
|
|
94
94
|
) -> dict:
|
|
95
|
-
"""Chat with AI
|
|
96
95
|
|
|
97
|
-
Args:
|
|
98
|
-
prompt (str): Prompt to be send.
|
|
99
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
100
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
101
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
102
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
103
|
-
Returns:
|
|
104
|
-
dict : {}
|
|
105
|
-
```json
|
|
106
|
-
{
|
|
107
|
-
"id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
|
|
108
|
-
"object": "chat.completion",
|
|
109
|
-
"created": 1704623244,
|
|
110
|
-
"model": "gpt-3.5-turbo",
|
|
111
|
-
"usage": {
|
|
112
|
-
"prompt_tokens": 0,
|
|
113
|
-
"completion_tokens": 0,
|
|
114
|
-
"total_tokens": 0
|
|
115
|
-
},
|
|
116
|
-
"choices": [
|
|
117
|
-
{
|
|
118
|
-
"message": {
|
|
119
|
-
"role": "assistant",
|
|
120
|
-
"content": "Hello! How can I assist you today?"
|
|
121
|
-
},
|
|
122
|
-
"finish_reason": "stop",
|
|
123
|
-
"index": 0
|
|
124
|
-
}
|
|
125
|
-
]
|
|
126
|
-
}
|
|
127
|
-
```
|
|
128
|
-
"""
|
|
129
96
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
130
97
|
if optimizer:
|
|
131
98
|
if optimizer in self.__available_optimizers:
|