webscout 6.2b0__py3-none-any.whl → 6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -176
- webscout/AIbase.py +112 -239
- webscout/AIutel.py +488 -1130
- webscout/Agents/functioncall.py +248 -198
- webscout/Bing_search.py +250 -153
- webscout/DWEBS.py +454 -178
- webscout/Extra/__init__.py +2 -1
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder_utiles.py +121 -0
- webscout/Extra/autocoder/rawdog.py +681 -0
- webscout/Extra/autollama.py +246 -195
- webscout/Extra/gguf.py +441 -226
- webscout/Extra/weather.py +172 -67
- webscout/LLM.py +442 -100
- webscout/Litlogger/__init__.py +681 -0
- webscout/Local/formats.py +4 -2
- webscout/Provider/Amigo.py +19 -10
- webscout/Provider/Andi.py +0 -33
- webscout/Provider/Blackboxai.py +4 -204
- webscout/Provider/DARKAI.py +1 -1
- webscout/Provider/EDITEE.py +1 -1
- webscout/Provider/Llama3.py +1 -1
- webscout/Provider/Marcus.py +137 -0
- webscout/Provider/NinjaChat.py +1 -1
- webscout/Provider/PI.py +221 -207
- webscout/Provider/Perplexity.py +598 -598
- webscout/Provider/RoboCoders.py +206 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
- webscout/Provider/TTI/__init__.py +3 -4
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +184 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
- webscout/Provider/TTI/blackbox/__init__.py +4 -0
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
- webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
- webscout/Provider/TTI/deepinfra/__init__.py +4 -0
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/imgninza/__init__.py +4 -0
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
- webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/__init__.py +146 -132
- webscout/Provider/askmyai.py +158 -0
- webscout/Provider/cerebras.py +227 -206
- webscout/Provider/geminiapi.py +208 -198
- webscout/Provider/llama3mitril.py +180 -0
- webscout/Provider/llmchat.py +203 -0
- webscout/Provider/mhystical.py +176 -0
- webscout/Provider/perplexitylabs.py +265 -0
- webscout/Provider/talkai.py +196 -0
- webscout/Provider/twitterclone.py +251 -244
- webscout/Provider/typegpt.py +359 -0
- webscout/__init__.py +28 -23
- webscout/__main__.py +5 -5
- webscout/cli.py +327 -347
- webscout/conversation.py +227 -0
- webscout/exceptions.py +161 -29
- webscout/litagent/__init__.py +172 -0
- webscout/litprinter/__init__.py +831 -0
- webscout/optimizers.py +270 -0
- webscout/prompt_manager.py +279 -0
- webscout/swiftcli/__init__.py +810 -0
- webscout/transcriber.py +479 -551
- webscout/update_checker.py +125 -0
- webscout/version.py +1 -1
- webscout-6.4.dist-info/LICENSE.md +211 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/METADATA +34 -55
- webscout-6.4.dist-info/RECORD +154 -0
- webscout/Provider/TTI/AIuncensored.py +0 -103
- webscout/Provider/TTI/Nexra.py +0 -120
- webscout/Provider/TTI/PollinationsAI.py +0 -138
- webscout/Provider/TTI/WebSimAI.py +0 -142
- webscout/Provider/TTI/aiforce.py +0 -160
- webscout/Provider/TTI/artbit.py +0 -141
- webscout/Provider/TTI/deepinfra.py +0 -148
- webscout/Provider/TTI/huggingface.py +0 -155
- webscout/models.py +0 -23
- webscout-6.2b0.dist-info/LICENSE.md +0 -50
- webscout-6.2b0.dist-info/RECORD +0 -118
- /webscout/{g4f.py → gpt4free.py} +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/WHEEL +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/entry_points.txt +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/top_level.txt +0 -0
webscout/Local/formats.py
CHANGED
|
@@ -291,7 +291,7 @@ llama2chat: dict[str, str | list] = {
|
|
|
291
291
|
# https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/
|
|
292
292
|
llama3: dict[str, str | list] = {
|
|
293
293
|
"system_prefix": "<|start_header_id|>system<|end_header_id|>\n\n",
|
|
294
|
-
"system_prompt": 'You are a helpful AI assistant
|
|
294
|
+
"system_prompt": 'You are a helpful AI assistant.',
|
|
295
295
|
"system_suffix": "<|eot_id|>\n",
|
|
296
296
|
"user_prefix": "<|start_header_id|>user<|end_header_id|>\n\n",
|
|
297
297
|
"user_suffix": "<|eot_id|>\n",
|
|
@@ -628,6 +628,8 @@ airoboros = llama2chat.copy()
|
|
|
628
628
|
airoboros['system_prompt'] = \
|
|
629
629
|
"You are a helpful, unbiased, uncensored assistant."
|
|
630
630
|
tess = synthia.copy()
|
|
631
|
+
helpingai2 = llama3.copy()
|
|
632
|
+
helpingai2['system_prompt'] = "You are HelpingAI, an emotional AI. Always answer my questions in the HelpingAI style."
|
|
631
633
|
tess['system_prompt'] = '' # Tess can use any system prompt, or none
|
|
632
634
|
alpaca_strict = alpaca.copy() # Alpaca with more stopping strings
|
|
633
635
|
alpaca_strict['stops'] = [
|
|
@@ -651,7 +653,7 @@ def _llama3_suffix_with_timestamp():
|
|
|
651
653
|
|
|
652
654
|
Llama3WithTimestamps = AdvancedFormat({
|
|
653
655
|
"system_prefix": "<|start_header_id|>system<|end_header_id|>\n\n",
|
|
654
|
-
"system_prompt": 'You are a helpful AI assistant
|
|
656
|
+
"system_prompt": 'You are a helpful AI assistant.',
|
|
655
657
|
"system_suffix": _llama3_suffix_with_timestamp,
|
|
656
658
|
"user_prefix": "<|start_header_id|>user<|end_header_id|>\n\n",
|
|
657
659
|
"user_suffix": _llama3_suffix_with_timestamp,
|
webscout/Provider/Amigo.py
CHANGED
|
@@ -16,12 +16,17 @@ class AmigoChat(Provider):
|
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
18
|
AVAILABLE_MODELS = [
|
|
19
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
|
20
|
-
"o1-mini",
|
|
21
|
-
"claude-3-sonnet-20240229",
|
|
22
|
-
"gemini-1.5-pro",
|
|
23
|
-
"gemini-1-5-flash",
|
|
24
|
-
"o1-preview",
|
|
19
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", # Llama 3
|
|
20
|
+
"o1-mini", # OpenAI O1 Mini
|
|
21
|
+
"claude-3-sonnet-20240229", # Claude Sonnet
|
|
22
|
+
"gemini-1.5-pro", # Gemini Pro
|
|
23
|
+
"gemini-1-5-flash", # Gemini Flash
|
|
24
|
+
"o1-preview", # OpenAI O1 Preview
|
|
25
|
+
"claude-3-5-sonnet-20241022", # Claude 3.5 Sonnet
|
|
26
|
+
"Qwen/Qwen2.5-72B-Instruct-Turbo", # Qwen 2.5
|
|
27
|
+
"gpt-4o" # OpenAI GPT-4o
|
|
28
|
+
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo" # Llama 3.2
|
|
29
|
+
|
|
25
30
|
]
|
|
26
31
|
|
|
27
32
|
def __init__(
|
|
@@ -29,13 +34,15 @@ class AmigoChat(Provider):
|
|
|
29
34
|
is_conversation: bool = True,
|
|
30
35
|
max_tokens: int = 600,
|
|
31
36
|
timeout: int = 30,
|
|
37
|
+
temperature: float = 1,
|
|
32
38
|
intro: str = None,
|
|
33
39
|
filepath: str = None,
|
|
40
|
+
top_p: float = 0.95,
|
|
34
41
|
update_file: bool = True,
|
|
35
42
|
proxies: dict = {},
|
|
36
43
|
history_offset: int = 10250,
|
|
37
44
|
act: str = None,
|
|
38
|
-
model: str = "
|
|
45
|
+
model: str = "Qwen/Qwen2.5-72B-Instruct-Turbo", # Default model
|
|
39
46
|
system_prompt: str = "You are a helpful and friendly AI assistant.",
|
|
40
47
|
):
|
|
41
48
|
"""
|
|
@@ -68,8 +75,10 @@ class AmigoChat(Provider):
|
|
|
68
75
|
self.api_endpoint = "https://api.amigochat.io/v1/chat/completions"
|
|
69
76
|
self.stream_chunk_size = 64
|
|
70
77
|
self.timeout = timeout
|
|
78
|
+
self.temperature = temperature
|
|
71
79
|
self.last_response = {}
|
|
72
80
|
self.model = model
|
|
81
|
+
self.top_p = top_p
|
|
73
82
|
self.headers = {
|
|
74
83
|
"Accept": "*/*",
|
|
75
84
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
@@ -158,11 +167,11 @@ class AmigoChat(Provider):
|
|
|
158
167
|
],
|
|
159
168
|
"model": self.model,
|
|
160
169
|
"frequency_penalty": 0,
|
|
161
|
-
"max_tokens":
|
|
170
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
162
171
|
"presence_penalty": 0,
|
|
163
172
|
"stream": stream,
|
|
164
|
-
"temperature":
|
|
165
|
-
"top_p":
|
|
173
|
+
"temperature":self.temperature,
|
|
174
|
+
"top_p": self.top_p
|
|
166
175
|
}
|
|
167
176
|
|
|
168
177
|
def for_stream():
|
webscout/Provider/Andi.py
CHANGED
|
@@ -92,40 +92,7 @@ class AndiSearch(Provider):
|
|
|
92
92
|
optimizer: str = None,
|
|
93
93
|
conversationally: bool = False,
|
|
94
94
|
) -> dict:
|
|
95
|
-
"""Chat with AI
|
|
96
95
|
|
|
97
|
-
Args:
|
|
98
|
-
prompt (str): Prompt to be send.
|
|
99
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
100
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
101
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
102
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
103
|
-
Returns:
|
|
104
|
-
dict : {}
|
|
105
|
-
```json
|
|
106
|
-
{
|
|
107
|
-
"id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
|
|
108
|
-
"object": "chat.completion",
|
|
109
|
-
"created": 1704623244,
|
|
110
|
-
"model": "gpt-3.5-turbo",
|
|
111
|
-
"usage": {
|
|
112
|
-
"prompt_tokens": 0,
|
|
113
|
-
"completion_tokens": 0,
|
|
114
|
-
"total_tokens": 0
|
|
115
|
-
},
|
|
116
|
-
"choices": [
|
|
117
|
-
{
|
|
118
|
-
"message": {
|
|
119
|
-
"role": "assistant",
|
|
120
|
-
"content": "Hello! How can I assist you today?"
|
|
121
|
-
},
|
|
122
|
-
"finish_reason": "stop",
|
|
123
|
-
"index": 0
|
|
124
|
-
}
|
|
125
|
-
]
|
|
126
|
-
}
|
|
127
|
-
```
|
|
128
|
-
"""
|
|
129
96
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
130
97
|
if optimizer:
|
|
131
98
|
if optimizer in self.__available_optimizers:
|
webscout/Provider/Blackboxai.py
CHANGED
|
@@ -146,7 +146,7 @@ class BLACKBOXAI:
|
|
|
146
146
|
for value in response.iter_lines(
|
|
147
147
|
decode_unicode=True,
|
|
148
148
|
chunk_size=self.stream_chunk_size,
|
|
149
|
-
|
|
149
|
+
|
|
150
150
|
):
|
|
151
151
|
try:
|
|
152
152
|
if bool(value):
|
|
@@ -206,206 +206,6 @@ class BLACKBOXAI:
|
|
|
206
206
|
def get_message(self, response: dict) -> str:
|
|
207
207
|
"""Retrieves message only from response
|
|
208
208
|
|
|
209
|
-
Args:
|
|
210
|
-
response (dict): Response generated by `self.ask`
|
|
211
|
-
|
|
212
|
-
Returns:
|
|
213
|
-
str: Message extracted
|
|
214
|
-
"""
|
|
215
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
216
|
-
return response["text"]
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
class AsyncBLACKBOXAI(AsyncProvider):
|
|
221
|
-
def __init__(
|
|
222
|
-
self,
|
|
223
|
-
is_conversation: bool = True,
|
|
224
|
-
max_tokens: int = 600,
|
|
225
|
-
timeout: int = 30,
|
|
226
|
-
intro: str = None,
|
|
227
|
-
filepath: str = None,
|
|
228
|
-
update_file: bool = True,
|
|
229
|
-
proxies: dict = {},
|
|
230
|
-
history_offset: int = 10250,
|
|
231
|
-
act: str = None,
|
|
232
|
-
model: str = None,
|
|
233
|
-
):
|
|
234
|
-
"""Instantiates BLACKBOXAI
|
|
235
|
-
|
|
236
|
-
Args:
|
|
237
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
|
|
238
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
239
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
240
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
241
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
242
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
243
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
244
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
245
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
246
|
-
model (str, optional): Model name. Defaults to "Phind Model".
|
|
247
|
-
"""
|
|
248
|
-
self.max_tokens_to_sample = max_tokens
|
|
249
|
-
self.is_conversation = is_conversation
|
|
250
|
-
self.chat_endpoint = "https://www.blackbox.ai/api/chat"
|
|
251
|
-
self.stream_chunk_size = 64
|
|
252
|
-
self.timeout = timeout
|
|
253
|
-
self.last_response = {}
|
|
254
|
-
self.model = model
|
|
255
|
-
self.previewToken: str = None
|
|
256
|
-
self.userId: str = ""
|
|
257
|
-
self.codeModelMode: bool = True
|
|
258
|
-
self.id: str = ""
|
|
259
|
-
self.agentMode: dict = {}
|
|
260
|
-
self.trendingAgentMode: dict = {}
|
|
261
|
-
self.isMicMode: bool = False
|
|
262
|
-
|
|
263
|
-
self.headers = {
|
|
264
|
-
"Content-Type": "application/json",
|
|
265
|
-
"User-Agent": "",
|
|
266
|
-
"Accept": "*/*",
|
|
267
|
-
"Accept-Encoding": "Identity",
|
|
268
|
-
}
|
|
269
|
-
|
|
270
|
-
self.__available_optimizers = (
|
|
271
|
-
method
|
|
272
|
-
for method in dir(Optimizers)
|
|
273
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
274
|
-
)
|
|
275
|
-
Conversation.intro = (
|
|
276
|
-
AwesomePrompts().get_act(
|
|
277
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
278
|
-
)
|
|
279
|
-
if act
|
|
280
|
-
else intro or Conversation.intro
|
|
281
|
-
)
|
|
282
|
-
self.conversation = Conversation(
|
|
283
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
284
|
-
)
|
|
285
|
-
self.conversation.history_offset = history_offset
|
|
286
|
-
self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
|
|
287
|
-
|
|
288
|
-
async def ask(
|
|
289
|
-
self,
|
|
290
|
-
prompt: str,
|
|
291
|
-
stream: bool = False,
|
|
292
|
-
raw: bool = False,
|
|
293
|
-
optimizer: str = None,
|
|
294
|
-
conversationally: bool = False,
|
|
295
|
-
) -> dict | AsyncGenerator:
|
|
296
|
-
"""Chat with AI asynchronously.
|
|
297
|
-
|
|
298
|
-
Args:
|
|
299
|
-
prompt (str): Prompt to be send.
|
|
300
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
301
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
302
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
303
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
304
|
-
Returns:
|
|
305
|
-
dict|AsyncGenerator : ai content
|
|
306
|
-
```json
|
|
307
|
-
{
|
|
308
|
-
"text" : "print('How may I help you today?')"
|
|
309
|
-
}
|
|
310
|
-
```
|
|
311
|
-
"""
|
|
312
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
313
|
-
if optimizer:
|
|
314
|
-
if optimizer in self.__available_optimizers:
|
|
315
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
316
|
-
conversation_prompt if conversationally else prompt
|
|
317
|
-
)
|
|
318
|
-
else:
|
|
319
|
-
raise Exception(
|
|
320
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
321
|
-
)
|
|
322
|
-
|
|
323
|
-
payload = {
|
|
324
|
-
"messages": [
|
|
325
|
-
# json.loads(prev_messages),
|
|
326
|
-
{"content": conversation_prompt, "role": "user"}
|
|
327
|
-
],
|
|
328
|
-
"id": self.id,
|
|
329
|
-
"previewToken": self.previewToken,
|
|
330
|
-
"userId": self.userId,
|
|
331
|
-
"codeModelMode": self.codeModelMode,
|
|
332
|
-
"agentMode": self.agentMode,
|
|
333
|
-
"trendingAgentMode": self.trendingAgentMode,
|
|
334
|
-
"isMicMode": self.isMicMode,
|
|
335
|
-
}
|
|
336
|
-
|
|
337
|
-
async def for_stream():
|
|
338
|
-
async with self.session.stream(
|
|
339
|
-
"POST", self.chat_endpoint, json=payload, timeout=self.timeout
|
|
340
|
-
) as response:
|
|
341
|
-
if (
|
|
342
|
-
not response.is_success
|
|
343
|
-
or not response.headers.get("Content-Type")
|
|
344
|
-
== "text/plain; charset=utf-8"
|
|
345
|
-
):
|
|
346
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
347
|
-
f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
|
|
348
|
-
)
|
|
349
|
-
streaming_text = ""
|
|
350
|
-
async for value in response.aiter_lines():
|
|
351
|
-
try:
|
|
352
|
-
if bool(value):
|
|
353
|
-
streaming_text += value + ("\n" if stream else "")
|
|
354
|
-
resp = dict(text=streaming_text)
|
|
355
|
-
self.last_response.update(resp)
|
|
356
|
-
yield value if raw else resp
|
|
357
|
-
except json.decoder.JSONDecodeError:
|
|
358
|
-
pass
|
|
359
|
-
self.conversation.update_chat_history(
|
|
360
|
-
prompt, await self.get_message(self.last_response)
|
|
361
|
-
)
|
|
362
|
-
|
|
363
|
-
async def for_non_stream():
|
|
364
|
-
async for _ in for_stream():
|
|
365
|
-
pass
|
|
366
|
-
return self.last_response
|
|
367
|
-
|
|
368
|
-
return for_stream() if stream else await for_non_stream()
|
|
369
|
-
|
|
370
|
-
async def chat(
|
|
371
|
-
self,
|
|
372
|
-
prompt: str,
|
|
373
|
-
stream: bool = False,
|
|
374
|
-
optimizer: str = None,
|
|
375
|
-
conversationally: bool = False,
|
|
376
|
-
) -> str | AsyncGenerator:
|
|
377
|
-
"""Generate response `str` asynchronously.
|
|
378
|
-
Args:
|
|
379
|
-
prompt (str): Prompt to be send.
|
|
380
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
381
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
382
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
383
|
-
Returns:
|
|
384
|
-
str|AsyncGenerator: Response generated
|
|
385
|
-
"""
|
|
386
|
-
|
|
387
|
-
async def for_stream():
|
|
388
|
-
async_ask = await self.ask(
|
|
389
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
390
|
-
)
|
|
391
|
-
async for response in async_ask:
|
|
392
|
-
yield await self.get_message(response)
|
|
393
|
-
|
|
394
|
-
async def for_non_stream():
|
|
395
|
-
return await self.get_message(
|
|
396
|
-
await self.ask(
|
|
397
|
-
prompt,
|
|
398
|
-
False,
|
|
399
|
-
optimizer=optimizer,
|
|
400
|
-
conversationally=conversationally,
|
|
401
|
-
)
|
|
402
|
-
)
|
|
403
|
-
|
|
404
|
-
return for_stream() if stream else await for_non_stream()
|
|
405
|
-
|
|
406
|
-
async def get_message(self, response: dict) -> str:
|
|
407
|
-
"""Retrieves message only from response
|
|
408
|
-
|
|
409
209
|
Args:
|
|
410
210
|
response (dict): Response generated by `self.ask`
|
|
411
211
|
|
|
@@ -418,10 +218,10 @@ class AsyncBLACKBOXAI(AsyncProvider):
|
|
|
418
218
|
# Function to clean the response text
|
|
419
219
|
def clean_response(response_text: str) -> str:
|
|
420
220
|
# Remove web search results
|
|
421
|
-
|
|
221
|
+
cleaned_response = re.sub(r'\$~~~\$.*?\$~~~\$', '', response_text, flags=re.DOTALL)
|
|
422
222
|
# Remove any remaining special characters or markers
|
|
423
|
-
|
|
424
|
-
return
|
|
223
|
+
cleaned_response = re.sub(r'\$~~~', '', cleaned_response)
|
|
224
|
+
return cleaned_response.strip()
|
|
425
225
|
if __name__ == '__main__':
|
|
426
226
|
from rich import print
|
|
427
227
|
ai = BLACKBOXAI()
|
webscout/Provider/DARKAI.py
CHANGED
|
@@ -218,7 +218,7 @@ class DARKAI(Provider):
|
|
|
218
218
|
return response["text"]
|
|
219
219
|
if __name__ == '__main__':
|
|
220
220
|
from rich import print
|
|
221
|
-
ai = DARKAI(
|
|
221
|
+
ai = DARKAI()
|
|
222
222
|
response = ai.chat("tell me about india")
|
|
223
223
|
for chunk in response:
|
|
224
224
|
print(chunk, end="", flush=True)
|
webscout/Provider/EDITEE.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import cloudscraper
|
|
2
2
|
from webscout.AIutel import Optimizers
|
|
3
|
-
from webscout.AIutel import Conversation
|
|
3
|
+
from webscout.AIutel import Conversation
|
|
4
4
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
5
5
|
from webscout.AIbase import Provider, AsyncProvider
|
|
6
6
|
from webscout import exceptions
|
webscout/Provider/Llama3.py
CHANGED
|
@@ -183,7 +183,7 @@ class LLAMA3(Provider):
|
|
|
183
183
|
|
|
184
184
|
if __name__ == "__main__":
|
|
185
185
|
from rich import print
|
|
186
|
-
ai = LLAMA3(api_key='
|
|
186
|
+
ai = LLAMA3(api_key='')
|
|
187
187
|
response = ai.chat(input(">>> "))
|
|
188
188
|
for chunks in response:
|
|
189
189
|
print(chunks, end="", flush=True)
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Optional, Generator
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Marcus(Provider):
|
|
13
|
+
"""
|
|
14
|
+
This class provides methods for interacting with the AskMarcus API.
|
|
15
|
+
Improved to match webscout provider standards.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
is_conversation: bool = True,
|
|
21
|
+
max_tokens: int = 2048, # Added max_tokens parameter
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
intro: str = None,
|
|
24
|
+
filepath: str = None,
|
|
25
|
+
update_file: bool = True,
|
|
26
|
+
proxies: dict = {},
|
|
27
|
+
history_offset: int = 10250,
|
|
28
|
+
act: str = None,
|
|
29
|
+
):
|
|
30
|
+
"""Initializes the Marcus API."""
|
|
31
|
+
self.session = requests.Session()
|
|
32
|
+
self.is_conversation = is_conversation
|
|
33
|
+
self.max_tokens_to_sample = max_tokens
|
|
34
|
+
self.api_endpoint = "https://www.askmarcus.app/api/response"
|
|
35
|
+
self.timeout = timeout
|
|
36
|
+
self.last_response = {}
|
|
37
|
+
self.headers = {
|
|
38
|
+
'content-type': 'application/json',
|
|
39
|
+
'accept': '*/*',
|
|
40
|
+
'origin': 'https://www.askmarcus.app',
|
|
41
|
+
'referer': 'https://www.askmarcus.app/chat',
|
|
42
|
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
|
|
43
|
+
}
|
|
44
|
+
self.__available_optimizers = (
|
|
45
|
+
method
|
|
46
|
+
for method in dir(Optimizers)
|
|
47
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
48
|
+
)
|
|
49
|
+
Conversation.intro = (
|
|
50
|
+
AwesomePrompts().get_act(
|
|
51
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
52
|
+
)
|
|
53
|
+
if act
|
|
54
|
+
else intro or Conversation.intro
|
|
55
|
+
)
|
|
56
|
+
self.conversation = Conversation(
|
|
57
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
58
|
+
)
|
|
59
|
+
self.conversation.history_offset = history_offset
|
|
60
|
+
self.session.proxies = proxies
|
|
61
|
+
|
|
62
|
+
def ask(
|
|
63
|
+
self,
|
|
64
|
+
prompt: str,
|
|
65
|
+
stream: bool = False,
|
|
66
|
+
raw: bool = False,
|
|
67
|
+
optimizer: str = None,
|
|
68
|
+
conversationally: bool = False,
|
|
69
|
+
) -> Dict[str, Any] | Generator[str, None, None]:
|
|
70
|
+
"""Sends a prompt to the AskMarcus API and returns the response."""
|
|
71
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
72
|
+
if optimizer:
|
|
73
|
+
if optimizer in self.__available_optimizers:
|
|
74
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
75
|
+
conversation_prompt if conversationally else prompt
|
|
76
|
+
)
|
|
77
|
+
else:
|
|
78
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
79
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
data = {"message": conversation_prompt}
|
|
83
|
+
|
|
84
|
+
def for_stream():
|
|
85
|
+
try:
|
|
86
|
+
with requests.post(self.api_endpoint, headers=self.headers, json=data, stream=True, timeout=self.timeout) as response:
|
|
87
|
+
response.raise_for_status()
|
|
88
|
+
for line in response.iter_lines():
|
|
89
|
+
if line:
|
|
90
|
+
yield line.decode('utf-8')
|
|
91
|
+
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
92
|
+
|
|
93
|
+
except requests.exceptions.RequestException as e:
|
|
94
|
+
raise exceptions.ProviderConnectionError(f"Error connecting to Marcus: {str(e)}")
|
|
95
|
+
|
|
96
|
+
def for_non_stream():
|
|
97
|
+
full_response = ""
|
|
98
|
+
for line in for_stream():
|
|
99
|
+
full_response += line
|
|
100
|
+
self.last_response = {"text": full_response}
|
|
101
|
+
return self.last_response
|
|
102
|
+
|
|
103
|
+
return for_stream() if stream else for_non_stream()
|
|
104
|
+
|
|
105
|
+
def chat(
|
|
106
|
+
self,
|
|
107
|
+
prompt: str,
|
|
108
|
+
stream: bool = False,
|
|
109
|
+
optimizer: str = None,
|
|
110
|
+
conversationally: bool = False,
|
|
111
|
+
) -> str | Generator[str, None, None]:
|
|
112
|
+
"""Generates a response from the AskMarcus API."""
|
|
113
|
+
|
|
114
|
+
def for_stream():
|
|
115
|
+
for response_chunk in self.ask(
|
|
116
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
117
|
+
):
|
|
118
|
+
yield response_chunk
|
|
119
|
+
|
|
120
|
+
def for_non_stream():
|
|
121
|
+
response = self.ask(
|
|
122
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
123
|
+
)
|
|
124
|
+
return self.get_message(response)
|
|
125
|
+
|
|
126
|
+
return for_stream() if stream else for_non_stream()
|
|
127
|
+
|
|
128
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
129
|
+
"""Extracts the message from the API response."""
|
|
130
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
131
|
+
return response.get("text", "")
|
|
132
|
+
|
|
133
|
+
if __name__ == '__main__':
|
|
134
|
+
ai = Marcus(timeout=30)
|
|
135
|
+
response = ai.chat("Tell me about India", stream=True)
|
|
136
|
+
for chunk in response:
|
|
137
|
+
print(chunk)
|
webscout/Provider/NinjaChat.py
CHANGED
|
@@ -35,7 +35,7 @@ class NinjaChat(Provider):
|
|
|
35
35
|
proxies: dict = {},
|
|
36
36
|
history_offset: int = 10250,
|
|
37
37
|
act: str = None,
|
|
38
|
-
model: str = "
|
|
38
|
+
model: str = "llama", # Default model
|
|
39
39
|
system_message: str = "You are a helpful AI assistant.", # Default system message
|
|
40
40
|
):
|
|
41
41
|
"""Initializes the NinjaChat API client."""
|