webscout 4.7__py3-none-any.whl → 4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (43) hide show
  1. webscout/Agents/functioncall.py +97 -37
  2. webscout/Bard.py +365 -0
  3. webscout/Local/_version.py +1 -1
  4. webscout/Provider/Andi.py +7 -1
  5. webscout/Provider/BasedGPT.py +11 -5
  6. webscout/Provider/Berlin4h.py +11 -5
  7. webscout/Provider/Blackboxai.py +10 -4
  8. webscout/Provider/Cohere.py +11 -5
  9. webscout/Provider/DARKAI.py +25 -7
  10. webscout/Provider/Deepinfra.py +2 -1
  11. webscout/Provider/Deepseek.py +25 -9
  12. webscout/Provider/DiscordRocks.py +389 -0
  13. webscout/Provider/{ChatGPTUK.py → Farfalle.py} +80 -67
  14. webscout/Provider/Gemini.py +1 -1
  15. webscout/Provider/Groq.py +244 -110
  16. webscout/Provider/Llama.py +13 -5
  17. webscout/Provider/Llama3.py +15 -2
  18. webscout/Provider/OLLAMA.py +8 -7
  19. webscout/Provider/Perplexity.py +422 -52
  20. webscout/Provider/Phind.py +6 -5
  21. webscout/Provider/PizzaGPT.py +7 -1
  22. webscout/Provider/__init__.py +12 -31
  23. webscout/Provider/ai4chat.py +193 -0
  24. webscout/Provider/koala.py +11 -5
  25. webscout/Provider/{VTLchat.py → liaobots.py} +120 -104
  26. webscout/Provider/meta.py +2 -1
  27. webscout/version.py +1 -1
  28. webscout/webai.py +2 -64
  29. webscout/webscout_search.py +1 -1
  30. {webscout-4.7.dist-info → webscout-4.8.dist-info}/METADATA +227 -252
  31. {webscout-4.7.dist-info → webscout-4.8.dist-info}/RECORD +35 -40
  32. webscout/Provider/FreeGemini.py +0 -169
  33. webscout/Provider/Geminiflash.py +0 -152
  34. webscout/Provider/Geminipro.py +0 -152
  35. webscout/Provider/Leo.py +0 -469
  36. webscout/Provider/OpenGPT.py +0 -867
  37. webscout/Provider/Xjai.py +0 -230
  38. webscout/Provider/Yepchat.py +0 -478
  39. webscout/Provider/Youchat.py +0 -225
  40. {webscout-4.7.dist-info → webscout-4.8.dist-info}/LICENSE.md +0 -0
  41. {webscout-4.7.dist-info → webscout-4.8.dist-info}/WHEEL +0 -0
  42. {webscout-4.7.dist-info → webscout-4.8.dist-info}/entry_points.txt +0 -0
  43. {webscout-4.7.dist-info → webscout-4.8.dist-info}/top_level.txt +0 -0
webscout/Provider/Leo.py DELETED
@@ -1,469 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
- #--------------------------------------LEO-----------------------------------------
32
- class LEO(Provider):
33
-
34
- def __init__(
35
- self,
36
- is_conversation: bool = True,
37
- max_tokens: int = 600,
38
- temperature: float = 0.2,
39
- top_k: int = -1,
40
- top_p: float = 0.999,
41
- model: str = "llama-2-13b-chat",
42
- brave_key: str = "qztbjzBqJueQZLFkwTTJrieu8Vw3789u",
43
- timeout: int = 30,
44
- intro: str = None,
45
- filepath: str = None,
46
- update_file: bool = True,
47
- proxies: dict = {},
48
- history_offset: int = 10250,
49
- act: str = None,
50
- ):
51
- """Instantiate TGPT
52
-
53
- Args:
54
- is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
55
- brave_key (str, optional): Brave API access key. Defaults to "qztbjzBqJueQZLFkwTTJrieu8Vw3789u".
56
- model (str, optional): Text generation model name. Defaults to "llama-2-13b-chat".
57
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
58
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
59
- top_k (int, optional): Chance of topic being repeated. Defaults to -1.
60
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
61
- timeput (int, optional): Http requesting timeout. Defaults to 30
62
- intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
63
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
64
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
65
- proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
66
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
67
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
68
- """
69
- self.session = requests.Session()
70
- self.is_conversation = is_conversation
71
- self.max_tokens_to_sample = max_tokens
72
- self.model = model
73
- self.stop_sequences = ["</response>", "</s>"]
74
- self.temperature = temperature
75
- self.top_k = top_k
76
- self.top_p = top_p
77
- self.chat_endpoint = "https://ai-chat.bsg.brave.com/v1/complete"
78
- self.stream_chunk_size = 64
79
- self.timeout = timeout
80
- self.last_response = {}
81
- self.headers = {
82
- "Content-Type": "application/json",
83
- "accept": "text/event-stream",
84
- "x-brave-key": brave_key,
85
- "accept-language": "en-US,en;q=0.9",
86
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/110.0",
87
- }
88
- self.__available_optimizers = (
89
- method
90
- for method in dir(Optimizers)
91
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
- )
93
- self.session.headers.update(self.headers)
94
- Conversation.intro = (
95
- AwesomePrompts().get_act(
96
- act, raise_not_found=True, default=None, case_insensitive=True
97
- )
98
- if act
99
- else intro or Conversation.intro
100
- )
101
- self.conversation = Conversation(
102
- is_conversation, self.max_tokens_to_sample, filepath, update_file
103
- )
104
- self.conversation.history_offset = history_offset
105
- self.session.proxies = proxies
106
- self.system_prompt = (
107
- "\n\nYour name is Leo, a helpful"
108
- "respectful and honest AI assistant created by the company Brave. You will be replying to a user of the Brave browser. "
109
- "Always respond in a neutral tone. Be polite and courteous. Answer concisely in no more than 50-80 words."
110
- "\n\nPlease ensure that your responses are socially unbiased and positive in nature."
111
- "If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. "
112
- "If you don't know the answer to a question, please don't share false information.\n"
113
- )
114
-
115
- def ask(
116
- self,
117
- prompt: str,
118
- stream: bool = False,
119
- raw: bool = False,
120
- optimizer: str = None,
121
- conversationally: bool = False,
122
- ) -> dict:
123
- """Chat with AI
124
-
125
- Args:
126
- prompt (str): Prompt to be send.
127
- stream (bool, optional): Flag for streaming response. Defaults to False.
128
- raw (bool, optional): Stream back raw response as received. Defaults to False.
129
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
130
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
131
- Returns:
132
- dict : {}
133
- ```json
134
- {
135
- "completion": "\nNext: domestic cat breeds with short hair >>",
136
- "stop_reason": null,
137
- "truncated": false,
138
- "stop": null,
139
- "model": "llama-2-13b-chat",
140
- "log_id": "cmpl-3kYiYxSNDvgMShSzFooz6t",
141
- "exception": null
142
- }
143
- ```
144
- """
145
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
146
- if optimizer:
147
- if optimizer in self.__available_optimizers:
148
- conversation_prompt = getattr(Optimizers, optimizer)(
149
- conversation_prompt if conversationally else prompt
150
- )
151
- else:
152
- raise Exception(
153
- f"Optimizer is not one of {self.__available_optimizers}"
154
- )
155
-
156
- self.session.headers.update(self.headers)
157
- payload = {
158
- "max_tokens_to_sample": self.max_tokens_to_sample,
159
- "model": self.model,
160
- "prompt": f"<s>[INST] <<SYS>>{self.system_prompt}<</SYS>>{conversation_prompt} [/INST]",
161
- "self.stop_sequence": self.stop_sequences,
162
- "stream": stream,
163
- "top_k": self.top_k,
164
- "top_p": self.top_p,
165
- }
166
-
167
- def for_stream():
168
- response = self.session.post(
169
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
170
- )
171
- if (
172
- not response.ok
173
- or not response.headers.get("Content-Type")
174
- == "text/event-stream; charset=utf-8"
175
- ):
176
- raise Exception(
177
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
178
- )
179
-
180
- for value in response.iter_lines(
181
- decode_unicode=True,
182
- delimiter="" if raw else "data:",
183
- chunk_size=self.stream_chunk_size,
184
- ):
185
- try:
186
- resp = json.loads(value)
187
- self.last_response.update(resp)
188
- yield value if raw else resp
189
- except json.decoder.JSONDecodeError:
190
- pass
191
- self.conversation.update_chat_history(
192
- prompt, self.get_message(self.last_response)
193
- )
194
-
195
- def for_non_stream():
196
- response = self.session.post(
197
- self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
198
- )
199
- if (
200
- not response.ok
201
- or not response.headers.get("Content-Type", "") == "application/json"
202
- ):
203
- raise Exception(
204
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
205
- )
206
- resp = response.json()
207
- self.last_response.update(resp)
208
- self.conversation.update_chat_history(
209
- prompt, self.get_message(self.last_response)
210
- )
211
- return resp
212
-
213
- return for_stream() if stream else for_non_stream()
214
-
215
- def chat(
216
- self,
217
- prompt: str,
218
- stream: bool = False,
219
- optimizer: str = None,
220
- conversationally: bool = False,
221
- ) -> str:
222
- """Generate response `str`
223
- Args:
224
- prompt (str): Prompt to be send.
225
- stream (bool, optional): Flag for streaming response. Defaults to False.
226
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
227
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
228
- Returns:
229
- str: Response generated
230
- """
231
-
232
- def for_stream():
233
- for response in self.ask(
234
- prompt, True, optimizer=optimizer, conversationally=conversationally
235
- ):
236
- yield self.get_message(response)
237
-
238
- def for_non_stream():
239
- return self.get_message(
240
- self.ask(
241
- prompt,
242
- False,
243
- optimizer=optimizer,
244
- conversationally=conversationally,
245
- )
246
- )
247
-
248
- return for_stream() if stream else for_non_stream()
249
-
250
- def get_message(self, response: dict) -> str:
251
- """Retrieves message only from response
252
-
253
- Args:
254
- response (dict): Response generated by `self.ask`
255
-
256
- Returns:
257
- str: Message extracted
258
- """
259
- assert isinstance(response, dict), "Response should be of dict data-type only"
260
- return response.get("completion")
261
- class AsyncLEO(AsyncProvider):
262
- def __init__(
263
- self,
264
- is_conversation: bool = True,
265
- max_tokens: int = 600,
266
- temperature: float = 0.2,
267
- top_k: int = -1,
268
- top_p: float = 0.999,
269
- model: str = "llama-2-13b-chat",
270
- brave_key: str = "qztbjzBqJueQZLFkwTTJrieu8Vw3789u",
271
- timeout: int = 30,
272
- intro: str = None,
273
- filepath: str = None,
274
- update_file: bool = True,
275
- proxies: dict = {},
276
- history_offset: int = 10250,
277
- act: str = None,
278
- ):
279
- """Instantiate TGPT
280
-
281
- Args:
282
- is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
283
- brave_key (str, optional): Brave API access key. Defaults to "qztbjzBqJueQZLFkwTTJrieu8Vw3789u".
284
- model (str, optional): Text generation model name. Defaults to "llama-2-13b-chat".
285
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
286
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
287
- top_k (int, optional): Chance of topic being repeated. Defaults to -1.
288
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
289
- timeput (int, optional): Http requesting timeout. Defaults to 30
290
- intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
291
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
292
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
293
- proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
294
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
295
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
296
- """
297
- self.is_conversation = is_conversation
298
- self.max_tokens_to_sample = max_tokens
299
- self.model = model
300
- self.stop_sequences = ["</response>", "</s>"]
301
- self.temperature = temperature
302
- self.top_k = top_k
303
- self.top_p = top_p
304
- self.chat_endpoint = "https://ai-chat.bsg.brave.com/v1/complete"
305
- self.stream_chunk_size = 64
306
- self.timeout = timeout
307
- self.last_response = {}
308
- self.headers = {
309
- "Content-Type": "application/json",
310
- "accept": "text/event-stream",
311
- "x-brave-key": brave_key,
312
- "accept-language": "en-US,en;q=0.9",
313
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/110.0",
314
- }
315
- self.__available_optimizers = (
316
- method
317
- for method in dir(Optimizers)
318
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
319
- )
320
- Conversation.intro = (
321
- AwesomePrompts().get_act(
322
- act, raise_not_found=True, default=None, case_insensitive=True
323
- )
324
- if act
325
- else intro or Conversation.intro
326
- )
327
- self.conversation = Conversation(
328
- is_conversation, self.max_tokens_to_sample, filepath, update_file
329
- )
330
- self.conversation.history_offset = history_offset
331
- self.system_prompt = (
332
- "\n\nYour name is Leo, a helpful"
333
- "respectful and honest AI assistant created by the company Brave. You will be replying to a user of the Brave browser. "
334
- "Always respond in a neutral tone. Be polite and courteous. Answer concisely in no more than 50-80 words."
335
- "\n\nPlease ensure that your responses are socially unbiased and positive in nature."
336
- "If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. "
337
- "If you don't know the answer to a question, please don't share false information.\n"
338
- )
339
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
340
-
341
- async def ask(
342
- self,
343
- prompt: str,
344
- stream: bool = False,
345
- raw: bool = False,
346
- optimizer: str = None,
347
- conversationally: bool = False,
348
- ) -> dict | AsyncGenerator:
349
- """Chat with AI asynchronously.
350
-
351
- Args:
352
- prompt (str): Prompt to be send.
353
- stream (bool, optional): Flag for streaming response. Defaults to False.
354
- raw (bool, optional): Stream back raw response as received. Defaults to False.
355
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
356
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
357
- Returns:
358
- dict|AsyncGenerator : ai content
359
- ```json
360
- {
361
- "completion": "\nNext: domestic cat breeds with short hair >>",
362
- "stop_reason": null,
363
- "truncated": false,
364
- "stop": null,
365
- "model": "llama-2-13b-chat",
366
- "log_id": "cmpl-3kYiYxSNDvgMShSzFooz6t",
367
- "exception": null
368
- }
369
- ```
370
- """
371
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
372
- if optimizer:
373
- if optimizer in self.__available_optimizers:
374
- conversation_prompt = getattr(Optimizers, optimizer)(
375
- conversation_prompt if conversationally else prompt
376
- )
377
- else:
378
- raise Exception(
379
- f"Optimizer is not one of {self.__available_optimizers}"
380
- )
381
-
382
- payload = {
383
- "max_tokens_to_sample": self.max_tokens_to_sample,
384
- "model": self.model,
385
- "prompt": f"<s>[INST] <<SYS>>{self.system_prompt}<</SYS>>{conversation_prompt} [/INST]",
386
- "self.stop_sequence": self.stop_sequences,
387
- "stream": stream,
388
- "top_k": self.top_k,
389
- "top_p": self.top_p,
390
- }
391
-
392
- async def for_stream():
393
- async with self.session.stream(
394
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
395
- ) as response:
396
- if (
397
- not response.is_success
398
- or not response.headers.get("Content-Type")
399
- == "text/event-stream; charset=utf-8"
400
- ):
401
- raise exceptions.FailedToGenerateResponseError(
402
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
403
- )
404
- async for value in response.aiter_lines():
405
- try:
406
- resp = sanitize_stream(value)
407
- self.last_response.update(resp)
408
- yield value if raw else resp
409
- except json.decoder.JSONDecodeError:
410
- pass
411
-
412
- self.conversation.update_chat_history(
413
- prompt, await self.get_message(self.last_response)
414
- )
415
-
416
- async def for_non_stream():
417
- async for _ in for_stream():
418
- pass
419
- return self.last_response
420
-
421
- return for_stream() if stream else await for_non_stream()
422
-
423
- async def chat(
424
- self,
425
- prompt: str,
426
- stream: bool = False,
427
- optimizer: str = None,
428
- conversationally: bool = False,
429
- ) -> str | AsyncGenerator:
430
- """Generate response `str` asynchronously.
431
- Args:
432
- prompt (str): Prompt to be send.
433
- stream (bool, optional): Flag for streaming response. Defaults to False.
434
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
435
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
436
- Returns:
437
- str|AsyncGenerator: Response generated
438
- """
439
-
440
- async def for_stream():
441
- async_ask = await self.ask(
442
- prompt, True, optimizer=optimizer, conversationally=conversationally
443
- )
444
- async for response in async_ask:
445
- yield await self.get_message(response)
446
-
447
- async def for_non_stream():
448
- return await self.get_message(
449
- await self.ask(
450
- prompt,
451
- False,
452
- optimizer=optimizer,
453
- conversationally=conversationally,
454
- )
455
- )
456
-
457
- return for_stream() if stream else await for_non_stream()
458
-
459
- async def get_message(self, response: dict) -> str:
460
- """Retrieves message only from response
461
-
462
- Args:
463
- response (dict): Response generated by `self.ask`
464
-
465
- Returns:
466
- str: Message extracted
467
- """
468
- assert isinstance(response, dict), "Response should be of dict data-type only"
469
- return response.get("completion")