webscout 4.7__py3-none-any.whl → 4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (53) hide show
  1. webscout/Agents/functioncall.py +97 -37
  2. webscout/Bard.py +365 -0
  3. webscout/Bing_search.py +124 -0
  4. webscout/DWEBS.py +141 -777
  5. webscout/Local/_version.py +1 -1
  6. webscout/Provider/Andi.py +7 -1
  7. webscout/Provider/BasedGPT.py +11 -5
  8. webscout/Provider/Berlin4h.py +11 -5
  9. webscout/Provider/Blackboxai.py +10 -4
  10. webscout/Provider/Cloudflare.py +286 -0
  11. webscout/Provider/Cohere.py +11 -5
  12. webscout/Provider/DARKAI.py +25 -7
  13. webscout/Provider/Deepinfra.py +2 -1
  14. webscout/Provider/Deepseek.py +25 -9
  15. webscout/Provider/DiscordRocks.py +389 -0
  16. webscout/Provider/Farfalle.py +227 -0
  17. webscout/Provider/Gemini.py +1 -1
  18. webscout/Provider/Groq.py +244 -110
  19. webscout/Provider/Llama.py +13 -5
  20. webscout/Provider/Llama3.py +15 -2
  21. webscout/Provider/OLLAMA.py +8 -7
  22. webscout/Provider/{Geminiflash.py → PI.py} +96 -40
  23. webscout/Provider/Perplexity.py +422 -52
  24. webscout/Provider/Phind.py +6 -5
  25. webscout/Provider/PizzaGPT.py +7 -1
  26. webscout/Provider/Youchat.py +98 -76
  27. webscout/Provider/__init__.py +26 -31
  28. webscout/Provider/ai4chat.py +193 -0
  29. webscout/Provider/{VTLchat.py → felo_search.py} +62 -76
  30. webscout/Provider/julius.py +263 -0
  31. webscout/Provider/koala.py +11 -5
  32. webscout/Provider/liaobots.py +268 -0
  33. webscout/Provider/meta.py +2 -1
  34. webscout/Provider/{ChatGPTUK.py → turboseek.py} +79 -56
  35. webscout/Provider/{FreeGemini.py → xdash.py} +51 -18
  36. webscout/Provider/yep.py +258 -0
  37. webscout/__init__.py +1 -59
  38. webscout/version.py +1 -1
  39. webscout/webai.py +2 -64
  40. webscout/webscout_search.py +1 -1
  41. {webscout-4.7.dist-info → webscout-4.9.dist-info}/METADATA +249 -323
  42. webscout-4.9.dist-info/RECORD +83 -0
  43. webscout/GoogleS.py +0 -342
  44. webscout/Provider/Geminipro.py +0 -152
  45. webscout/Provider/Leo.py +0 -469
  46. webscout/Provider/OpenGPT.py +0 -867
  47. webscout/Provider/Xjai.py +0 -230
  48. webscout/Provider/Yepchat.py +0 -478
  49. webscout-4.7.dist-info/RECORD +0 -80
  50. {webscout-4.7.dist-info → webscout-4.9.dist-info}/LICENSE.md +0 -0
  51. {webscout-4.7.dist-info → webscout-4.9.dist-info}/WHEEL +0 -0
  52. {webscout-4.7.dist-info → webscout-4.9.dist-info}/entry_points.txt +0 -0
  53. {webscout-4.7.dist-info → webscout-4.9.dist-info}/top_level.txt +0 -0
@@ -19,29 +19,20 @@ import io
19
19
  import re
20
20
  import json
21
21
  import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
22
+ from webscout.AIutel import Optimizers
23
+ from webscout.AIutel import Conversation
24
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
25
+ from webscout.AIbase import Provider, AsyncProvider
27
26
  from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict, Optional
27
+ from typing import Any, AsyncGenerator, Dict
29
28
  import logging
30
29
  import httpx
31
- class VTLchat(Provider):
32
- """
33
- A class to interact with the VTLchat AI API.
34
- """
35
30
 
31
+ class Felo(Provider):
36
32
  def __init__(
37
33
  self,
38
34
  is_conversation: bool = True,
39
35
  max_tokens: int = 600,
40
- temperature: float = 0.9,
41
- presence_penalty: float = 0,
42
- frequency_penalty: float = 0,
43
- top_p: float = 1,
44
- model: str = "gpt-3.5-turbo",
45
36
  timeout: int = 30,
46
37
  intro: str = None,
47
38
  filepath: str = None,
@@ -49,19 +40,12 @@ class VTLchat(Provider):
49
40
  proxies: dict = {},
50
41
  history_offset: int = 10250,
51
42
  act: str = None,
52
- system_prompt: Optional[str] = "You are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: 5/11/2024, 12:26:29 PM\nLatex inline: $x^2$ \nLatex block: $$e=mc^2$$"
53
- ) -> None:
54
- """
55
- Initializes the VTLchat API with given parameters.
43
+ ):
44
+ """Instantiates Felo
56
45
 
57
46
  Args:
58
47
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
59
48
  max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
60
- temperature (float, optional): Controls randomness. Default is 0.9.
61
- presence_penalty (float, optional): Controls repetition. Default is 0.
62
- frequency_penalty (float, optional): Controls frequency of token usage. Default is 0.
63
- top_p (float, optional): Controls diversity. Default is 1.
64
- model (str, optional): The AI model to use. Default is 'gpt-3.5-turbo'.
65
49
  timeout (int, optional): Http request timeout. Defaults to 30.
66
50
  intro (str, optional): Conversation introductory prompt. Defaults to None.
67
51
  filepath (str, optional): Path to file containing conversation history. Defaults to None.
@@ -69,22 +53,31 @@ class VTLchat(Provider):
69
53
  proxies (dict, optional): Http request proxies. Defaults to {}.
70
54
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
71
55
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
72
- system_prompt (str, optional): System prompt to prepend to the conversation history.
73
56
  """
74
57
  self.session = requests.Session()
75
58
  self.is_conversation = is_conversation
76
59
  self.max_tokens_to_sample = max_tokens
77
- self.api_endpoint = "https://vtlchat-g1.vercel.app/api/openai/v1/chat/completions"
60
+ self.chat_endpoint = "https://api.felo.ai/search/threads"
78
61
  self.stream_chunk_size = 64
79
62
  self.timeout = timeout
80
63
  self.last_response = {}
81
- self.model = model
82
- self.temperature = temperature
83
- self.presence_penalty = presence_penalty
84
- self.frequency_penalty = frequency_penalty
85
- self.top_p = top_p
86
- self.system_prompt = system_prompt
87
- self.headers = {"Content-Type": "application/json"}
64
+ self.headers = {
65
+ "accept": "*/*",
66
+ "accept-encoding": "gzip, deflate, br, zstd",
67
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
68
+ "content-type": "application/json",
69
+ "cookie": "_clck=1gifk45%7C2%7Cfoa%7C0%7C1686; _clsk=1g5lv07%7C1723558310439%7C1%7C1%7Cu.clarity.ms%2Fcollect; _ga=GA1.1.877307181.1723558313; _ga_8SZPRV97HV=GS1.1.1723558313.1.1.1723558341.0.0.0; _ga_Q9Q1E734CC=GS1.1.1723558313.1.1.1723558341.0.0.0",
70
+ "dnt": "1",
71
+ "origin": "https://felo.ai",
72
+ "referer": "https://felo.ai/",
73
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
74
+ "sec-ch-ua-mobile": "?0",
75
+ "sec-ch-ua-platform": '"Windows"',
76
+ "sec-fetch-dest": "empty",
77
+ "sec-fetch-mode": "cors",
78
+ "sec-fetch-site": "same-site",
79
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
80
+ }
88
81
 
89
82
  self.__available_optimizers = (
90
83
  method
@@ -125,25 +118,7 @@ class VTLchat(Provider):
125
118
  dict : {}
126
119
  ```json
127
120
  {
128
- "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
129
- "object": "chat.completion",
130
- "created": 1704623244,
131
- "model": "gpt-3.5-turbo",
132
- "usage": {
133
- "prompt_tokens": 0,
134
- "completion_tokens": 0,
135
- "total_tokens": 0
136
- },
137
- "choices": [
138
- {
139
- "message": {
140
- "role": "assistant",
141
- "content": "Hello! How can I assist you today?"
142
- },
143
- "finish_reason": "stop",
144
- "index": 0
145
- }
146
- ]
121
+ "text" : "How may I assist you today?"
147
122
  }
148
123
  ```
149
124
  """
@@ -160,39 +135,42 @@ class VTLchat(Provider):
160
135
 
161
136
  self.session.headers.update(self.headers)
162
137
  payload = {
163
- "messages": [
164
- {"role": "system", "content": self.system_prompt},
165
- {"role": "user", "content": conversation_prompt}
166
- ],
167
- "stream": True,
168
- "model": self.model,
169
- "temperature": self.temperature,
170
- "presence_penalty": self.presence_penalty,
171
- "frequency_penalty": self.frequency_penalty,
172
- "top_p": self.top_p
138
+ "query": conversation_prompt,
139
+ "search_uuid": uuid4().hex,
140
+ "lang": "",
141
+ "agent_lang": "en",
142
+ "search_options": {
143
+ "langcode": "en-US"
144
+ },
145
+ "search_video": True,
146
+ "contexts_from": "google"
173
147
  }
174
148
 
175
149
  def for_stream():
176
150
  response = self.session.post(
177
- self.api_endpoint, json=payload, stream=True, timeout=self.timeout
151
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
178
152
  )
179
153
  if not response.ok:
180
154
  raise exceptions.FailedToGenerateResponseError(
181
155
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
182
156
  )
183
157
 
184
- streaming_response = ""
185
- for line in response.iter_lines(decode_unicode=True, chunk_size=1):
186
- if line:
187
- modified_line = re.sub("data:", "", line)
188
- try:
189
- json_data = json.loads(modified_line)
190
- content = json_data['choices'][0]['delta']['content']
191
- streaming_response += content
192
- yield content if raw else dict(text=streaming_response)
193
- except:
194
- continue
195
- self.last_response.update(dict(text=streaming_response))
158
+ streaming_text = ""
159
+ for value in response.iter_lines(
160
+ decode_unicode=True,
161
+ chunk_size=self.stream_chunk_size,
162
+ delimiter="\n",
163
+ ):
164
+ try:
165
+ if bool(value) and value.startswith('data:'):
166
+ data = json.loads(value[len('data:'):].strip())
167
+ if data['type'] == 'a':
168
+ streaming_text += data['data']['k']
169
+ resp = dict(text=streaming_text)
170
+ self.last_response.update(resp)
171
+ yield value if raw else resp
172
+ except json.decoder.JSONDecodeError:
173
+ pass
196
174
  self.conversation.update_chat_history(
197
175
  prompt, self.get_message(self.last_response)
198
176
  )
@@ -249,4 +227,12 @@ class VTLchat(Provider):
249
227
  str: Message extracted
250
228
  """
251
229
  assert isinstance(response, dict), "Response should be of dict data-type only"
252
- return response["text"]
230
+
231
+ text = re.sub(r'\[\[\d+\]\]', '', response["text"])
232
+ return text
233
+ if __name__ == '__main__':
234
+ from rich import print
235
+ ai = Felo()
236
+ response = ai.chat(input(">>> "))
237
+ for chunk in response:
238
+ print(chunk, end="", flush=True)
@@ -0,0 +1,263 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from webscout.AIutel import Optimizers
23
+ from webscout.AIutel import Conversation
24
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
25
+ from webscout.AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+
31
+ class Julius(Provider):
32
+ AVAILABLE_MODELS = [
33
+ "Llama 3",
34
+ "GPT-4o",
35
+ "GPT-3.5",
36
+ "Command R",
37
+ "Gemini Flash",
38
+ "Gemini 1.5",
39
+ ]
40
+ def __init__(
41
+ self,
42
+ is_conversation: bool = True,
43
+ max_tokens: int = 600,
44
+ timeout: int = 30,
45
+ intro: str = None,
46
+ filepath: str = None,
47
+ update_file: bool = True,
48
+ proxies: dict = {},
49
+ history_offset: int = 10250,
50
+ act: str = None,
51
+ model: str = "Gemini Flash",
52
+ ):
53
+ """Instantiates Julius
54
+
55
+ Args:
56
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
57
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
58
+ timeout (int, optional): Http request timeout. Defaults to 30.
59
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
60
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
61
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
62
+ proxies (dict, optional): Http request proxies. Defaults to {}.
63
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
64
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
65
+ model (str, optional): Model to use for generating text. Defaults to "Gemini Flash".
66
+ Options: "Llama 3", "GPT-4o", "GPT-3.5", "Command R", "Gemini Flash", "Gemini 1.5".
67
+ """
68
+ if model not in self.AVAILABLE_MODELS:
69
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
70
+
71
+ self.session = requests.Session()
72
+ self.is_conversation = is_conversation
73
+ self.max_tokens_to_sample = max_tokens
74
+ self.chat_endpoint = "https://api.julius.ai/api/chat/message"
75
+ self.stream_chunk_size = 64
76
+ self.timeout = timeout
77
+ self.last_response = {}
78
+ self.model = model
79
+ self.headers = {
80
+ "accept": "*/*",
81
+ "accept-encoding": "gzip, deflate, br, zstd",
82
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
83
+ "authorization": "Bearer",
84
+ "content-length": "206",
85
+ "content-type": "application/json",
86
+ "conversation-id": str(uuid.uuid4()),
87
+ "dnt": "1",
88
+ "interactive-charts": "true",
89
+ "is-demo": "temp_14aabbb1-95bc-4203-a678-596258d6fdf3",
90
+ "is-native": "false",
91
+ "orient-split": "true",
92
+ "origin": "https://julius.ai",
93
+ "platform": "undefined",
94
+ "priority": "u=1, i",
95
+ "referer": "https://julius.ai/",
96
+ "request-id": str(uuid.uuid4()),
97
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
98
+ "visitor-id": str(uuid.uuid4())
99
+ }
100
+
101
+ self.__available_optimizers = (
102
+ method
103
+ for method in dir(Optimizers)
104
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
105
+ )
106
+ self.session.headers.update(self.headers)
107
+ Conversation.intro = (
108
+ AwesomePrompts().get_act(
109
+ act, raise_not_found=True, default=None, case_insensitive=True
110
+ )
111
+ if act
112
+ else intro or Conversation.intro
113
+ )
114
+ self.conversation = Conversation(
115
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
116
+ )
117
+ self.conversation.history_offset = history_offset
118
+ self.session.proxies = proxies
119
+
120
+ def ask(
121
+ self,
122
+ prompt: str,
123
+ stream: bool = False,
124
+ raw: bool = False,
125
+ optimizer: str = None,
126
+ conversationally: bool = False,
127
+ ) -> dict:
128
+ """Chat with AI
129
+
130
+ Args:
131
+ prompt (str): Prompt to be send.
132
+ stream (bool, optional): Whether to stream the response. Defaults to False.
133
+ raw (bool, optional): Whether to return the raw response. Defaults to False.
134
+ optimizer (str, optional): The name of the optimizer to use. Defaults to None.
135
+ conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
136
+
137
+ Returns:
138
+ The response from the API.
139
+ """
140
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
141
+ if optimizer:
142
+ if optimizer in self.__available_optimizers:
143
+ conversation_prompt = getattr(Optimizers, optimizer)(
144
+ conversation_prompt if conversationally else prompt
145
+ )
146
+ else:
147
+ raise Exception(
148
+ f"Optimizer is not one of {self.__available_optimizers}"
149
+ )
150
+
151
+ payload = {
152
+ "message": {"content": conversation_prompt, "role": "user"},
153
+ "provider": "default",
154
+ "chat_mode": "auto",
155
+ "client_version": "20240130",
156
+ "theme": "dark",
157
+ "new_images": None,
158
+ "new_attachments": None,
159
+ "dataframe_format": "json",
160
+ "selectedModels": [self.model] # Choose the model here
161
+ }
162
+
163
+ def for_stream():
164
+ response = self.session.post(
165
+ self.chat_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
166
+ )
167
+
168
+ if not response.ok:
169
+ raise exceptions.FailedToGenerateResponseError(
170
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
171
+ )
172
+ streaming_response = ""
173
+ for line in response.iter_lines(decode_unicode=True):
174
+ if line:
175
+ try:
176
+ json_line = json.loads(line)
177
+ content = json_line['content']
178
+ streaming_response += content
179
+ yield content if raw else dict(text=streaming_response)
180
+ except:
181
+ continue
182
+ self.last_response.update(dict(text=streaming_response))
183
+ self.conversation.update_chat_history(
184
+ prompt, self.get_message(self.last_response)
185
+ )
186
+
187
+ def for_non_stream():
188
+ response = self.session.post(
189
+ self.chat_endpoint, json=payload, headers=self.headers, timeout=self.timeout
190
+ )
191
+
192
+ if not response.ok:
193
+ raise exceptions.FailedToGenerateResponseError(
194
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
195
+ )
196
+ full_content = ""
197
+ for line in response.text.splitlines():
198
+ try:
199
+ data = json.loads(line)
200
+ if "content" in data:
201
+ full_content += data['content']
202
+ except json.JSONDecodeError:
203
+ pass
204
+ self.last_response.update(dict(text=full_content))
205
+ self.conversation.update_chat_history(
206
+ prompt, self.get_message(self.last_response)
207
+ )
208
+ return self.last_response
209
+
210
+ return for_stream() if stream else for_non_stream()
211
+
212
+ def chat(
213
+ self,
214
+ prompt: str,
215
+ stream: bool = False,
216
+ optimizer: str = None,
217
+ conversationally: bool = False,
218
+ ) -> str:
219
+ """Generate response `str`
220
+ Args:
221
+ prompt (str): Prompt to be send.
222
+ stream (bool, optional): Flag for streaming response. Defaults to False.
223
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
224
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
225
+ Returns:
226
+ str: Response generated
227
+ """
228
+
229
+ def for_stream():
230
+ for response in self.ask(
231
+ prompt, True, optimizer=optimizer, conversationally=conversationally
232
+ ):
233
+ yield self.get_message(response)
234
+
235
+ def for_non_stream():
236
+ return self.get_message(
237
+ self.ask(
238
+ prompt,
239
+ False,
240
+ optimizer=optimizer,
241
+ conversationally=conversationally,
242
+ )
243
+ )
244
+
245
+ return for_stream() if stream else for_non_stream()
246
+
247
+ def get_message(self, response: dict) -> str:
248
+ """Retrieves message only from response
249
+
250
+ Args:
251
+ response (dict): Response generated by `self.ask`
252
+
253
+ Returns:
254
+ str: Message extracted
255
+ """
256
+ assert isinstance(response, dict), "Response should be of dict data-type only"
257
+ return response["text"]
258
+ if __name__ == '__main__':
259
+ from rich import print
260
+ ai = Julius()
261
+ response = ai.chat(input(">>> "))
262
+ for chunk in response:
263
+ print(chunk, end="", flush=True)
@@ -1,10 +1,10 @@
1
1
  import requests
2
2
  import json
3
3
  from typing import Any, Dict, Optional
4
- from ..AIutel import Optimizers
5
- from ..AIutel import Conversation
6
- from ..AIutel import AwesomePrompts, sanitize_stream
7
- from ..AIbase import Provider
4
+ from webscout.AIutel import Optimizers
5
+ from webscout.AIutel import Conversation
6
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
7
+ from webscout.AIbase import Provider
8
8
  from webscout import exceptions
9
9
 
10
10
  class KOALA(Provider):
@@ -236,4 +236,10 @@ class KOALA(Provider):
236
236
  str: Message extracted
237
237
  """
238
238
  assert isinstance(response, dict), "Response should be of dict data-type only"
239
- return response["text"]
239
+ return response["text"]
240
+ if __name__ == '__main__':
241
+ from rich import print
242
+ ai = KOALA()
243
+ response = ai.chat("tell me about india")
244
+ for chunk in response:
245
+ print(chunk, end="", flush=True)