webscout 5.3__py3-none-any.whl → 5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (52) hide show
  1. webscout/AIauto.py +8 -12
  2. webscout/Agents/Onlinesearcher.py +5 -5
  3. webscout/Agents/functioncall.py +123 -97
  4. webscout/Local/_version.py +2 -2
  5. webscout/Provider/Andi.py +1 -21
  6. webscout/Provider/BasedGPT.py +1 -21
  7. webscout/Provider/Blackboxai.py +1 -21
  8. webscout/Provider/Chatify.py +3 -2
  9. webscout/Provider/Cloudflare.py +1 -22
  10. webscout/Provider/Cohere.py +2 -23
  11. webscout/Provider/DARKAI.py +0 -1
  12. webscout/Provider/Deepinfra.py +2 -16
  13. webscout/Provider/EDITEE.py +3 -26
  14. webscout/Provider/Gemini.py +1 -24
  15. webscout/Provider/Groq.py +0 -2
  16. webscout/Provider/Koboldai.py +0 -21
  17. webscout/Provider/Llama.py +4 -21
  18. webscout/Provider/OLLAMA.py +0 -17
  19. webscout/Provider/Openai.py +2 -22
  20. webscout/Provider/Perplexity.py +1 -2
  21. webscout/Provider/Phind.py +3 -508
  22. webscout/Provider/Reka.py +4 -22
  23. webscout/Provider/TTI/__init__.py +2 -1
  24. webscout/Provider/TTI/aiforce.py +137 -0
  25. webscout/Provider/TTS/streamElements.py +0 -17
  26. webscout/Provider/TTS/voicepod.py +0 -1
  27. webscout/Provider/ThinkAnyAI.py +17 -78
  28. webscout/Provider/Youchat.py +3 -20
  29. webscout/Provider/__init__.py +12 -5
  30. webscout/Provider/cleeai.py +212 -0
  31. webscout/Provider/elmo.py +237 -0
  32. webscout/Provider/felo_search.py +4 -22
  33. webscout/Provider/geminiapi.py +198 -0
  34. webscout/Provider/genspark.py +222 -0
  35. webscout/Provider/julius.py +3 -20
  36. webscout/Provider/koala.py +1 -1
  37. webscout/Provider/lepton.py +194 -0
  38. webscout/Provider/turboseek.py +4 -21
  39. webscout/Provider/x0gpt.py +3 -2
  40. webscout/Provider/xdash.py +2 -22
  41. webscout/Provider/yep.py +391 -149
  42. webscout/YTdownloader.py +2 -3
  43. webscout/tempid.py +46 -2
  44. webscout/version.py +1 -1
  45. webscout/webscout_search_async.py +9 -9
  46. {webscout-5.3.dist-info → webscout-5.5.dist-info}/METADATA +39 -64
  47. webscout-5.5.dist-info/RECORD +99 -0
  48. webscout-5.3.dist-info/RECORD +0 -93
  49. {webscout-5.3.dist-info → webscout-5.5.dist-info}/LICENSE.md +0 -0
  50. {webscout-5.3.dist-info → webscout-5.5.dist-info}/WHEEL +0 -0
  51. {webscout-5.3.dist-info → webscout-5.5.dist-info}/entry_points.txt +0 -0
  52. {webscout-5.3.dist-info → webscout-5.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,222 @@
1
+ import cloudscraper
2
+ from uuid import uuid4
3
+ import json
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+
10
+
11
+ class Genspark(Provider):
12
+ """
13
+ A class to interact with the Genspark.ai API.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ is_conversation: bool = True,
19
+ max_tokens: int = 600,
20
+ timeout: int = 30,
21
+ intro: str = None,
22
+ filepath: str = None,
23
+ update_file: bool = True,
24
+ proxies: dict = {},
25
+ history_offset: int = 10250,
26
+ act: str = None,
27
+ ) -> None:
28
+ """Instantiates Genspark
29
+
30
+ Args:
31
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
32
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
33
+ timeout (int, optional): Http request timeout. Defaults to 30.
34
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
35
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
36
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
37
+ proxies (dict, optional): Http request proxies. Defaults to {}.
38
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
39
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
40
+ """
41
+ self.session = cloudscraper.create_scraper()
42
+ self.is_conversation = is_conversation
43
+ self.max_tokens_to_sample = max_tokens
44
+ self.chat_endpoint = "https://www.genspark.ai/api/search/stream"
45
+ self.stream_chunk_size = 64
46
+ self.timeout = timeout
47
+ self.last_response = {}
48
+ self.headers = {
49
+ "Accept": "*/*",
50
+ "Accept-Encoding": "gzip, deflate, br, zstd",
51
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
52
+ "Content-Type": "application/json",
53
+ "DNT": "1",
54
+ "Origin": "https://www.genspark.ai",
55
+ "Priority": "u=1, i",
56
+ "Sec-CH-UA": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
57
+ "Sec-CH-UA-Mobile": "?0",
58
+ "Sec-CH-UA-Platform": '"Windows"',
59
+ "Sec-Fetch-Dest": "empty",
60
+ "Sec-Fetch-Mode": "cors",
61
+ "Sec-Fetch-Site": "same-origin",
62
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
63
+ }
64
+ self.cookies = {
65
+ "i18n_redirected": "en-US",
66
+ "agree_terms": "0",
67
+ "session_id": uuid4().hex,
68
+ }
69
+
70
+ self.__available_optimizers = (
71
+ method
72
+ for method in dir(Optimizers)
73
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
74
+ )
75
+ self.session.headers.update(self.headers)
76
+ Conversation.intro = (
77
+ AwesomePrompts().get_act(
78
+ act, raise_not_found=True, default=None, case_insensitive=True
79
+ )
80
+ if act
81
+ else intro or Conversation.intro
82
+ )
83
+ self.conversation = Conversation(
84
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
85
+ )
86
+ self.conversation.history_offset = history_offset
87
+ self.session.proxies = proxies
88
+
89
+ def ask(
90
+ self,
91
+ prompt: str,
92
+ stream: bool = False,
93
+ raw: bool = False,
94
+ optimizer: str = None,
95
+ conversationally: bool = False,
96
+ ) -> dict:
97
+ """Chat with AI
98
+
99
+ Args:
100
+ prompt (str): Prompt to be send.
101
+ stream (bool, optional): Flag for streaming response. Defaults to False.
102
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
103
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
104
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
105
+ Returns:
106
+ dict : {}
107
+ ```json
108
+ {
109
+ "text" : "How may I assist you today?"
110
+ }
111
+ ```
112
+ """
113
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
114
+ if optimizer:
115
+ if optimizer in self.__available_optimizers:
116
+ conversation_prompt = getattr(Optimizers, optimizer)(
117
+ conversation_prompt if conversationally else prompt
118
+ )
119
+ else:
120
+ raise Exception(
121
+ f"Optimizer is not one of {self.__available_optimizers}"
122
+ )
123
+
124
+ self.url = (
125
+ f"https://www.genspark.ai/api/search/stream?query={conversation_prompt}"
126
+ )
127
+
128
+ payload = {}
129
+ def for_stream():
130
+ response = self.session.post(
131
+ self.url,
132
+ headers=self.headers,
133
+ cookies=self.cookies,
134
+ json=payload,
135
+ stream=True,
136
+ timeout=self.timeout,
137
+ )
138
+
139
+ partial_response = ""
140
+ for line in response.iter_lines(decode_unicode=True):
141
+ if line:
142
+ try:
143
+ data = json.loads(line[5:])
144
+ if (
145
+ data["type"] == "result_field"
146
+ and data["field_name"] == "deep_dive_result"
147
+ ):
148
+ deep_dive_result = data["field_value"]
149
+ if "detailAnswer" in deep_dive_result:
150
+ new_content = deep_dive_result["detailAnswer"][
151
+ len(partial_response) :
152
+ ]
153
+ partial_response = deep_dive_result["detailAnswer"]
154
+ self.last_response.update(dict(text=new_content))
155
+ yield new_content if raw else dict(text=new_content)
156
+ except json.JSONDecodeError:
157
+ print(f"Skipping invalid JSON line: {line}")
158
+ self.conversation.update_chat_history(
159
+ prompt, self.get_message(self.last_response)
160
+ )
161
+
162
+ def for_non_stream():
163
+ for _ in for_stream():
164
+ pass
165
+ return self.last_response
166
+
167
+ return for_stream() if stream else for_non_stream()
168
+
169
+ def chat(
170
+ self,
171
+ prompt: str,
172
+ stream: bool = False,
173
+ optimizer: str = None,
174
+ conversationally: bool = False,
175
+ ) -> str:
176
+ """Generate response `str`
177
+ Args:
178
+ prompt (str): Prompt to be send.
179
+ stream (bool, optional): Flag for streaming response. Defaults to False.
180
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
181
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
182
+ Returns:
183
+ str: Response generated
184
+ """
185
+
186
+ def for_stream():
187
+ for response in self.ask(
188
+ prompt, True, optimizer=optimizer, conversationally=conversationally
189
+ ):
190
+ yield self.get_message(response)
191
+
192
+ def for_non_stream():
193
+ return self.get_message(
194
+ self.ask(
195
+ prompt,
196
+ False,
197
+ optimizer=optimizer,
198
+ conversationally=conversationally,
199
+ )
200
+ )
201
+
202
+ return for_stream() if stream else for_non_stream()
203
+
204
+ def get_message(self, response: dict) -> str:
205
+ """Retrieves message only from response
206
+
207
+ Args:
208
+ response (dict): Response generated by `self.ask`
209
+
210
+ Returns:
211
+ str: Message extracted
212
+ """
213
+ assert isinstance(response, dict), "Response should be of dict data-type only"
214
+ return response["text"]
215
+
216
+
217
+ if __name__ == "__main__":
218
+ from rich import print
219
+ ai = Genspark()
220
+ response = ai.chat(input(">>> "))
221
+ for chunk in response:
222
+ print(chunk, end="", flush=True)
@@ -1,32 +1,15 @@
1
- import time
1
+
2
2
  import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
3
+
9
4
  import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
5
  import json
21
- import yaml
22
6
  from webscout.AIutel import Optimizers
23
7
  from webscout.AIutel import Conversation
24
8
  from webscout.AIutel import AwesomePrompts, sanitize_stream
25
9
  from webscout.AIbase import Provider, AsyncProvider
26
10
  from webscout import exceptions
27
11
  from typing import Any, AsyncGenerator, Dict
28
- import logging
29
- import httpx
12
+
30
13
 
31
14
  class Julius(Provider):
32
15
  AVAILABLE_MODELS = [
@@ -236,7 +236,7 @@ class KOALA(Provider):
236
236
  str: Message extracted
237
237
  """
238
238
  assert isinstance(response, dict), "Response should be of dict data-type only"
239
- return response["text"]
239
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
240
240
  if __name__ == '__main__':
241
241
  from rich import print
242
242
  ai = KOALA()
@@ -0,0 +1,194 @@
1
+ import requests
2
+ import re
3
+ import json
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+
10
+ class Lepton(Provider):
11
+ """
12
+ A class to interact with the Lepton.run API.
13
+ """
14
+ def __init__(
15
+ self,
16
+ is_conversation: bool = True,
17
+ max_tokens: int = 600,
18
+ timeout: int = 30,
19
+ intro: str = None,
20
+ filepath: str = None,
21
+ update_file: bool = True,
22
+ proxies: dict = {},
23
+ history_offset: int = 10250,
24
+ act: str = None,
25
+ ) -> None:
26
+ """Instantiates Lepton
27
+
28
+ Args:
29
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
30
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
31
+ timeout (int, optional): Http request timeout. Defaults to 30.
32
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
33
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
34
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
35
+ proxies (dict, optional): Http request proxies. Defaults to {}.
36
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
37
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
38
+ """
39
+ self.session = requests.Session()
40
+ self.is_conversation = is_conversation
41
+ self.max_tokens_to_sample = max_tokens
42
+ self.api_endpoint = "https://search.lepton.run/api/query"
43
+ self.stream_chunk_size = 64
44
+ self.timeout = timeout
45
+ self.last_response = {}
46
+ self.headers = {
47
+ "accept": "*/*",
48
+ "accept-encoding": "gzip, deflate, br, zstd",
49
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
50
+ "content-type": "text/plain;charset=UTF-8",
51
+ "dnt": "1",
52
+ "origin": "https://search.lepton.run",
53
+ "priority": "u=1, i",
54
+ "referer": "https://search.lepton.run/search?q=BYSyA&rid=aqZSHQomzwBBF3fyHnrND",
55
+ "sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
56
+ "sec-ch-ua-mobile": "?0",
57
+ "sec-ch-ua-platform": '"Windows"',
58
+ "sec-fetch-dest": "empty",
59
+ "sec-fetch-mode": "cors",
60
+ "sec-fetch-site": "same-origin",
61
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
62
+ }
63
+
64
+ self.__available_optimizers = (
65
+ method
66
+ for method in dir(Optimizers)
67
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
68
+ )
69
+ self.session.headers.update(self.headers)
70
+ Conversation.intro = (
71
+ AwesomePrompts().get_act(
72
+ act, raise_not_found=True, default=None, case_insensitive=True
73
+ )
74
+ if act
75
+ else intro or Conversation.intro
76
+ )
77
+ self.conversation = Conversation(
78
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
79
+ )
80
+ self.conversation.history_offset = history_offset
81
+ self.session.proxies = proxies
82
+
83
+ def ask(
84
+ self,
85
+ prompt: str,
86
+ stream: bool = False,
87
+ raw: bool = False,
88
+ optimizer: str = None,
89
+ conversationally: bool = False,
90
+ ) -> dict:
91
+ """Chat with AI
92
+
93
+ Args:
94
+ prompt (str): Prompt to be send.
95
+ stream (bool, optional): Flag for streaming response. Defaults to False.
96
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
97
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
98
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
99
+ Returns:
100
+ dict : {}
101
+ ```json
102
+ {
103
+ "text" : "How may I assist you today?"
104
+ }
105
+ ```
106
+ """
107
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
108
+ if optimizer:
109
+ if optimizer in self.__available_optimizers:
110
+ conversation_prompt = getattr(Optimizers, optimizer)(
111
+ conversation_prompt if conversationally else prompt
112
+ )
113
+ else:
114
+ raise Exception(
115
+ f"Optimizer is not one of {self.__available_optimizers}"
116
+ )
117
+
118
+ self.session.headers.update(self.headers)
119
+ payload = json.dumps({"query": conversation_prompt})
120
+
121
+ def for_non_stream():
122
+ response = self.session.post(
123
+ self.api_endpoint, data=payload, headers=self.headers, timeout=self.timeout
124
+ )
125
+ if not response.ok:
126
+ raise Exception(
127
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
128
+ )
129
+
130
+ response_text = response.text
131
+ start_marker = "__LLM_RESPONSE__"
132
+ end_marker = "__RELATED_QUESTIONS__"
133
+
134
+ start_index = response_text.find(start_marker) + len(start_marker)
135
+ end_index = response_text.find(end_marker)
136
+
137
+ if start_index != -1 and end_index != -1:
138
+ extracted_text = response_text[start_index:end_index].strip()
139
+
140
+ # Remove citations using regular expression
141
+ cleaned_text = re.sub(r'\[citation:\d+\]', '', extracted_text)
142
+
143
+ self.last_response.update(dict(text=cleaned_text))
144
+
145
+ self.conversation.update_chat_history(
146
+ prompt, self.get_message(self.last_response)
147
+ )
148
+ return self.last_response
149
+
150
+ return for_non_stream()
151
+
152
+ def chat(
153
+ self,
154
+ prompt: str,
155
+ stream: bool = False,
156
+ optimizer: str = None,
157
+ conversationally: bool = False,
158
+ ) -> str:
159
+ """Generate response `str`
160
+ Args:
161
+ prompt (str): Prompt to be send.
162
+ stream (bool, optional): Flag for streaming response. Defaults to False.
163
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
164
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
165
+ Returns:
166
+ str: Response generated
167
+ """
168
+
169
+ return self.get_message(
170
+ self.ask(
171
+ prompt,
172
+ optimizer=optimizer,
173
+ conversationally=conversationally,
174
+ )
175
+ )
176
+
177
+ def get_message(self, response: dict) -> str:
178
+ """Retrieves message only from response
179
+
180
+ Args:
181
+ response (dict): Response generated by `self.ask`
182
+
183
+ Returns:
184
+ str: Message extracted
185
+ """
186
+ assert isinstance(response, dict), "Response should be of dict data-type only"
187
+ return response["text"]
188
+
189
+ if __name__ == '__main__':
190
+ from rich import print
191
+ ai = Lepton()
192
+ response = ai.chat("tell me about india")
193
+ for chunk in response:
194
+ print(chunk, end="", flush=True)
@@ -1,32 +1,15 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
1
  import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
2
+
3
+
20
4
  import json
21
- import yaml
5
+
22
6
  from webscout.AIutel import Optimizers
23
7
  from webscout.AIutel import Conversation
24
8
  from webscout.AIutel import AwesomePrompts, sanitize_stream
25
9
  from webscout.AIbase import Provider, AsyncProvider
26
10
  from webscout import exceptions
27
11
  from typing import Any, AsyncGenerator, Dict
28
- import logging
29
- import httpx
12
+
30
13
 
31
14
  class TurboSeek(Provider):
32
15
  """
@@ -1,7 +1,7 @@
1
1
  from typing import Any, Dict
2
2
  from uuid import uuid4
3
3
  import requests
4
- import json
4
+
5
5
  import re
6
6
 
7
7
  from webscout.AIutel import Optimizers
@@ -171,7 +171,8 @@ class X0GPT(Provider):
171
171
  Extracts the message from the API response.
172
172
  """
173
173
  assert isinstance(response, dict), "Response should be of dict data-type only"
174
- return response["text"]
174
+ formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
175
+ return formatted_text
175
176
 
176
177
  if __name__ == "__main__":
177
178
  from rich import print
@@ -1,32 +1,11 @@
1
- import time
2
1
  import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
2
  import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
3
  from webscout.AIutel import Optimizers
23
4
  from webscout.AIutel import Conversation
24
5
  from webscout.AIutel import AwesomePrompts, sanitize_stream
25
6
  from webscout.AIbase import Provider, AsyncProvider
26
7
  from webscout import exceptions
27
8
  from typing import Any, AsyncGenerator, Dict
28
- import logging
29
- import httpx
30
9
 
31
10
  class XDASH(Provider):
32
11
  def __init__(
@@ -199,4 +178,5 @@ if __name__ == '__main__':
199
178
  from rich import print
200
179
  ai = XDASH()
201
180
  response = ai.chat(input(">>> "))
202
- print(response)
181
+ for chunk in response:
182
+ print(chunk, end="", flush=True)