webscout 4.7__py3-none-any.whl → 4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (53) hide show
  1. webscout/Agents/functioncall.py +97 -37
  2. webscout/Bard.py +365 -0
  3. webscout/Bing_search.py +124 -0
  4. webscout/DWEBS.py +141 -777
  5. webscout/Local/_version.py +1 -1
  6. webscout/Provider/Andi.py +7 -1
  7. webscout/Provider/BasedGPT.py +11 -5
  8. webscout/Provider/Berlin4h.py +11 -5
  9. webscout/Provider/Blackboxai.py +10 -4
  10. webscout/Provider/Cloudflare.py +286 -0
  11. webscout/Provider/Cohere.py +11 -5
  12. webscout/Provider/DARKAI.py +25 -7
  13. webscout/Provider/Deepinfra.py +2 -1
  14. webscout/Provider/Deepseek.py +25 -9
  15. webscout/Provider/DiscordRocks.py +389 -0
  16. webscout/Provider/Farfalle.py +227 -0
  17. webscout/Provider/Gemini.py +1 -1
  18. webscout/Provider/Groq.py +244 -110
  19. webscout/Provider/Llama.py +13 -5
  20. webscout/Provider/Llama3.py +15 -2
  21. webscout/Provider/OLLAMA.py +8 -7
  22. webscout/Provider/{Geminiflash.py → PI.py} +96 -40
  23. webscout/Provider/Perplexity.py +422 -52
  24. webscout/Provider/Phind.py +6 -5
  25. webscout/Provider/PizzaGPT.py +7 -1
  26. webscout/Provider/Youchat.py +98 -76
  27. webscout/Provider/__init__.py +26 -31
  28. webscout/Provider/ai4chat.py +193 -0
  29. webscout/Provider/{VTLchat.py → felo_search.py} +62 -76
  30. webscout/Provider/julius.py +263 -0
  31. webscout/Provider/koala.py +11 -5
  32. webscout/Provider/liaobots.py +268 -0
  33. webscout/Provider/meta.py +2 -1
  34. webscout/Provider/{ChatGPTUK.py → turboseek.py} +79 -56
  35. webscout/Provider/{FreeGemini.py → xdash.py} +51 -18
  36. webscout/Provider/yep.py +258 -0
  37. webscout/__init__.py +1 -59
  38. webscout/version.py +1 -1
  39. webscout/webai.py +2 -64
  40. webscout/webscout_search.py +1 -1
  41. {webscout-4.7.dist-info → webscout-4.9.dist-info}/METADATA +249 -323
  42. webscout-4.9.dist-info/RECORD +83 -0
  43. webscout/GoogleS.py +0 -342
  44. webscout/Provider/Geminipro.py +0 -152
  45. webscout/Provider/Leo.py +0 -469
  46. webscout/Provider/OpenGPT.py +0 -867
  47. webscout/Provider/Xjai.py +0 -230
  48. webscout/Provider/Yepchat.py +0 -478
  49. webscout-4.7.dist-info/RECORD +0 -80
  50. {webscout-4.7.dist-info → webscout-4.9.dist-info}/LICENSE.md +0 -0
  51. {webscout-4.7.dist-info → webscout-4.9.dist-info}/WHEEL +0 -0
  52. {webscout-4.7.dist-info → webscout-4.9.dist-info}/entry_points.txt +0 -0
  53. {webscout-4.7.dist-info → webscout-4.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,258 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from webscout.AIutel import Optimizers
23
+ from webscout.AIutel import Conversation
24
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
25
+ from webscout.AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+ import cloudscraper
31
+
32
+ class YEPCHAT(Provider):
33
+ """
34
+ This class provides methods for interacting with the Yep.com chat API in a consistent provider structure.
35
+ """
36
+
37
+ AVAILABLE_MODELS = [
38
+ "Mixtral-8x7B-Instruct-v0.1" # Add other available models here
39
+ ]
40
+
41
+ def __init__(
42
+ self,
43
+ is_conversation: bool = True,
44
+ max_tokens: int = 1280,
45
+ timeout: int = 30,
46
+ intro: str = None,
47
+ filepath: str = None,
48
+ update_file: bool = True,
49
+ proxies: dict = {},
50
+ history_offset: int = 10250,
51
+ act: str = None,
52
+ model: str = "Mixtral-8x7B-Instruct-v0.1",
53
+ temperature: float = 0.6,
54
+ top_p: float = 0.7,
55
+ ):
56
+ """Instantiates YEPCHAT
57
+
58
+ Args:
59
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
60
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 1280.
61
+ timeout (int, optional): Http request timeout. Defaults to 30.
62
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
63
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
64
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
65
+ proxies (dict, optional): Http request proxies. Defaults to {}.
66
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
67
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
68
+ model (str, optional): Model to use for generating text. Defaults to "Mixtral-8x7B-Instruct-v0.1".
69
+ temperature (float, optional): Temperature parameter for the model. Defaults to 0.6.
70
+ top_p (float, optional): Top_p parameter for the model. Defaults to 0.7.
71
+ """
72
+
73
+ if model not in self.AVAILABLE_MODELS:
74
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
75
+
76
+ self.session = cloudscraper.create_scraper() # Create a Cloudscraper session
77
+ self.is_conversation = is_conversation
78
+ self.max_tokens_to_sample = max_tokens
79
+ self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
80
+ self.stream_chunk_size = 64
81
+ self.timeout = timeout
82
+ self.last_response = {}
83
+ self.model = model
84
+ self.temperature = temperature
85
+ self.top_p = top_p
86
+ self.headers = {
87
+ 'Accept': '*/*',
88
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
89
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
90
+ 'Content-Type': 'application/json; charset=utf-8',
91
+ 'DNT': '1',
92
+ 'Origin': 'https://yep.com',
93
+ 'Referer': 'https://yep.com/',
94
+ 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
95
+ 'Sec-CH-UA-Mobile': '?0',
96
+ 'Sec-CH-UA-Platform': '"Windows"',
97
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0'
98
+ }
99
+ self.cookies = {
100
+ '__Host-session': uuid4().hex,
101
+ }
102
+
103
+ self.__available_optimizers = (
104
+ method
105
+ for method in dir(Optimizers)
106
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
107
+ )
108
+ Conversation.intro = (
109
+ AwesomePrompts().get_act(
110
+ act, raise_not_found=True, default=None, case_insensitive=True
111
+ )
112
+ if act
113
+ else intro or Conversation.intro
114
+ )
115
+ self.conversation = Conversation(
116
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
117
+ )
118
+ self.conversation.history_offset = history_offset
119
+ self.session.proxies = proxies
120
+
121
+ def ask(
122
+ self,
123
+ prompt: str,
124
+ stream: bool = False,
125
+ raw: bool = False,
126
+ optimizer: str = None,
127
+ conversationally: bool = False,
128
+ ) -> dict:
129
+ """Chat with AI
130
+
131
+ Args:
132
+ prompt (str): Prompt to be send.
133
+ stream (bool, optional): Whether to stream the response. Defaults to False.
134
+ raw (bool, optional): Whether to return the raw response. Defaults to False.
135
+ optimizer (str, optional): The name of the optimizer to use. Defaults to None.
136
+ conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
137
+
138
+ Returns:
139
+ The response from the API.
140
+ """
141
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
142
+ if optimizer:
143
+ if optimizer in self.__available_optimizers:
144
+ conversation_prompt = getattr(Optimizers, optimizer)(
145
+ conversation_prompt if conversationally else prompt
146
+ )
147
+ else:
148
+ raise Exception(
149
+ f"Optimizer is not one of {self.__available_optimizers}"
150
+ )
151
+
152
+ data = {
153
+ "stream": stream,
154
+ "max_tokens": self.max_tokens_to_sample,
155
+ "top_p": self.top_p,
156
+ "temperature": self.temperature,
157
+ "messages": [
158
+ {"content": conversation_prompt, "role": "user"}
159
+ ],
160
+ "model": self.model
161
+ }
162
+
163
+ def for_stream():
164
+ response = self.session.post(
165
+ self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout
166
+ )
167
+ if not response.ok:
168
+ raise exceptions.FailedToGenerateResponseError(
169
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
170
+ )
171
+ streaming_response = ""
172
+ for line in response.iter_lines(decode_unicode=True):
173
+ if line:
174
+ if line.startswith("data: "):
175
+ json_data = line[6:]
176
+ if json_data == "[DONE]":
177
+ break
178
+ try:
179
+ data = json.loads(json_data)
180
+ content = data["choices"][0]["delta"].get("content", "")
181
+ streaming_response += content
182
+ yield content if raw else dict(text=streaming_response)
183
+ except json.decoder.JSONDecodeError:
184
+ continue
185
+ self.last_response.update(dict(text=streaming_response))
186
+ self.conversation.update_chat_history(
187
+ prompt, self.get_message(self.last_response)
188
+ )
189
+
190
+ def for_non_stream():
191
+ response = self.session.post(
192
+ self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout
193
+ )
194
+ if not response.ok:
195
+ raise exceptions.FailedToGenerateResponseError(
196
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
197
+ )
198
+ resp = response.json()
199
+ self.last_response.update(dict(text=resp['choices'][0]['message']['content']))
200
+ self.conversation.update_chat_history(
201
+ prompt, self.get_message(self.last_response)
202
+ )
203
+ return self.last_response
204
+
205
+ return for_stream() if stream else for_non_stream()
206
+
207
+ def chat(
208
+ self,
209
+ prompt: str,
210
+ stream: bool = False,
211
+ optimizer: str = None,
212
+ conversationally: bool = False,
213
+ ) -> str:
214
+ """Generate response `str`
215
+ Args:
216
+ prompt (str): Prompt to be send.
217
+ stream (bool, optional): Flag for streaming response. Defaults to False.
218
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
219
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
220
+ Returns:
221
+ str: Response generated
222
+ """
223
+
224
+ def for_stream():
225
+ for response in self.ask(
226
+ prompt, True, optimizer=optimizer, conversationally=conversationally
227
+ ):
228
+ yield self.get_message(response)
229
+
230
+ def for_non_stream():
231
+ return self.get_message(
232
+ self.ask(
233
+ prompt,
234
+ False,
235
+ optimizer=optimizer,
236
+ conversationally=conversationally,
237
+ )
238
+ )
239
+
240
+ return for_stream() if stream else for_non_stream()
241
+
242
+ def get_message(self, response: dict) -> str:
243
+ """Retrieves message only from response
244
+
245
+ Args:
246
+ response (dict): Response generated by `self.ask`
247
+
248
+ Returns:
249
+ str: Message extracted
250
+ """
251
+ assert isinstance(response, dict), "Response should be of dict data-type only"
252
+ return response["text"]
253
+ if __name__ == '__main__':
254
+ from rich import print
255
+ ai = YEPCHAT()
256
+ response = ai.chat("tell me about india")
257
+ for chunk in response:
258
+ print(chunk, end="", flush=True)
webscout/__init__.py CHANGED
@@ -7,7 +7,7 @@ from .voice import play_audio
7
7
  from .websx_search import WEBSX
8
8
  from .LLM import VLM, LLM
9
9
  from .YTdownloader import *
10
-
10
+ from .Bing_search import *
11
11
  import g4f
12
12
  from .YTdownloader import *
13
13
  from .Provider import *
@@ -55,64 +55,6 @@ gpt4free_providers = [
55
55
 
56
56
  available_providers = webai + gpt4free_providers
57
57
 
58
- # Add all the provider classes, Localai models, Thread, and Model to __all__
59
- __all__ = [
60
- "WEBS",
61
- "AsyncWEBS",
62
- "__version__",
63
- "DWEBS",
64
- "transcriber",
65
- "play_audio",
66
- "TempMailClient",
67
- "TemporaryPhoneNumber",
68
- "LLM",
69
- "YTdownloader",
70
- "WEBSX",
71
- "VLM",
72
- # Localai models and utilities
73
- # "Model",
74
- # "Thread",
75
- # "formats",
76
-
77
- # AI Providers
78
- 'ThinkAnyAI',
79
- 'Xjai',
80
- 'LLAMA2',
81
- 'AsyncLLAMA2',
82
- 'Cohere',
83
- 'REKA',
84
- 'GROQ',
85
- 'AsyncGROQ',
86
- 'OPENAI',
87
- 'AsyncOPENAI',
88
- 'LEO',
89
- 'AsyncLEO',
90
- 'KOBOLDAI',
91
- 'AsyncKOBOLDAI',
92
- 'OPENGPT',
93
- 'AsyncOPENGPT',
94
- 'PERPLEXITY',
95
- 'BLACKBOXAI',
96
- 'AsyncBLACKBOXAI',
97
- 'PhindSearch',
98
- 'AsyncPhindSearch',
99
- 'YEPCHAT',
100
- 'AsyncYEPCHAT',
101
- 'YouChat',
102
- 'GEMINI',
103
- 'Berlin4h',
104
- 'ChatGPTUK',
105
- 'POE',
106
- 'BasedGPT',
107
- 'DeepSeek',
108
- 'DeepInfra',
109
- 'VLM',
110
- 'AsyncDeepInfra',
111
- 'VTLchat',
112
- 'AsyncPhindv2',
113
- 'Phindv2',
114
- 'OPENGPTv2',
115
- ]
116
58
 
117
59
  import logging
118
60
  logging.getLogger("webscout").addHandler(logging.NullHandler())
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "4.6"
1
+ __version__ = "4.8"
2
2
  __prog__ = "webscout"
webscout/webai.py CHANGED
@@ -432,26 +432,6 @@ class Main(cmd.Cmd):
432
432
  intro=intro,
433
433
  act=awesome_prompt,
434
434
  )
435
- elif provider == "leo":
436
- from webscout import LEO
437
-
438
- self.bot = LEO(
439
- is_conversation=disable_conversation,
440
- max_tokens=max_tokens,
441
- temperature=temperature,
442
- top_k=top_k,
443
- top_p=top_p,
444
- model=getOr(model, "llama-2-13b-chat"),
445
- brave_key=getOr(auth, "qztbjzBqJueQZLFkwTTJrieu8Vw3789u"),
446
- timeout=timeout,
447
- intro=intro,
448
- filepath=filepath,
449
- update_file=update_file,
450
- proxies=proxies,
451
- history_offset=history_offset,
452
- act=awesome_prompt,
453
- )
454
-
455
435
  elif provider == "openai":
456
436
  assert auth, (
457
437
  "OpenAI's API-key is required. " "Use the flag `--key` or `-k`"
@@ -546,20 +526,6 @@ class Main(cmd.Cmd):
546
526
  history_offset=history_offset,
547
527
  act=awesome_prompt,
548
528
  )
549
- elif provider == "chatgptuk":
550
- from webscout import ChatGPTUK
551
-
552
- self.bot = ChatGPTUK(
553
- is_conversation=disable_conversation,
554
- max_tokens=max_tokens,
555
- timeout=timeout,
556
- intro=intro,
557
- filepath=filepath,
558
- update_file=update_file,
559
- proxies=proxies,
560
- history_offset=history_offset,
561
- act=awesome_prompt,
562
- )
563
529
  elif provider == "yepchat":
564
530
  from webscout import YEPCHAT
565
531
 
@@ -705,34 +671,6 @@ class Main(cmd.Cmd):
705
671
  history_offset=history_offset,
706
672
  act=awesome_prompt,
707
673
  )
708
- elif provider == "geminiflash":
709
- from webscout import GEMINIFLASH
710
-
711
- self.bot = GEMINIFLASH(
712
- is_conversation=disable_conversation,
713
- max_tokens=max_tokens,
714
- timeout=timeout,
715
- intro=intro,
716
- filepath=filepath,
717
- update_file=update_file,
718
- proxies=proxies,
719
- history_offset=history_offset,
720
- act=awesome_prompt,
721
- )
722
- elif provider == "geminipro":
723
- from webscout import GEMINIPRO
724
-
725
- self.bot = GEMINIPRO(
726
- is_conversation=disable_conversation,
727
- max_tokens=max_tokens,
728
- timeout=timeout,
729
- intro=intro,
730
- filepath=filepath,
731
- update_file=update_file,
732
- proxies=proxies,
733
- history_offset=history_offset,
734
- act=awesome_prompt,
735
- )
736
674
 
737
675
  elif provider == "vtlchat":
738
676
  from webscout import VTLchat
@@ -846,9 +784,9 @@ class Main(cmd.Cmd):
846
784
 
847
785
 
848
786
  elif provider == "perplexity":
849
- from webscout import PERPLEXITY
787
+ from webscout import Perplexity
850
788
 
851
- self.bot = PERPLEXITY(
789
+ self.bot = Perplexity(
852
790
  is_conversation=disable_conversation,
853
791
  max_tokens=max_tokens,
854
792
  timeout=timeout,
@@ -140,7 +140,7 @@ class WEBS:
140
140
  models = {
141
141
  "claude-3-haiku": "claude-3-haiku-20240307",
142
142
  "gpt-3.5": "gpt-3.5-turbo-0125",
143
- "llama-3-70b": "meta-llama/Llama-3-70b-chat-hf",
143
+ "llama-3-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
144
144
  "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
145
145
  "gpt-4o-mini": "gpt-4o-mini",
146
146
  }