webscout 4.7__py3-none-any.whl → 4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (53) hide show
  1. webscout/Agents/functioncall.py +97 -37
  2. webscout/Bard.py +365 -0
  3. webscout/Bing_search.py +124 -0
  4. webscout/DWEBS.py +141 -777
  5. webscout/Local/_version.py +1 -1
  6. webscout/Provider/Andi.py +7 -1
  7. webscout/Provider/BasedGPT.py +11 -5
  8. webscout/Provider/Berlin4h.py +11 -5
  9. webscout/Provider/Blackboxai.py +10 -4
  10. webscout/Provider/Cloudflare.py +286 -0
  11. webscout/Provider/Cohere.py +11 -5
  12. webscout/Provider/DARKAI.py +25 -7
  13. webscout/Provider/Deepinfra.py +2 -1
  14. webscout/Provider/Deepseek.py +25 -9
  15. webscout/Provider/DiscordRocks.py +389 -0
  16. webscout/Provider/Farfalle.py +227 -0
  17. webscout/Provider/Gemini.py +1 -1
  18. webscout/Provider/Groq.py +244 -110
  19. webscout/Provider/Llama.py +13 -5
  20. webscout/Provider/Llama3.py +15 -2
  21. webscout/Provider/OLLAMA.py +8 -7
  22. webscout/Provider/{Geminiflash.py → PI.py} +96 -40
  23. webscout/Provider/Perplexity.py +422 -52
  24. webscout/Provider/Phind.py +6 -5
  25. webscout/Provider/PizzaGPT.py +7 -1
  26. webscout/Provider/Youchat.py +98 -76
  27. webscout/Provider/__init__.py +26 -31
  28. webscout/Provider/ai4chat.py +193 -0
  29. webscout/Provider/{VTLchat.py → felo_search.py} +62 -76
  30. webscout/Provider/julius.py +263 -0
  31. webscout/Provider/koala.py +11 -5
  32. webscout/Provider/liaobots.py +268 -0
  33. webscout/Provider/meta.py +2 -1
  34. webscout/Provider/{ChatGPTUK.py → turboseek.py} +79 -56
  35. webscout/Provider/{FreeGemini.py → xdash.py} +51 -18
  36. webscout/Provider/yep.py +258 -0
  37. webscout/__init__.py +1 -59
  38. webscout/version.py +1 -1
  39. webscout/webai.py +2 -64
  40. webscout/webscout_search.py +1 -1
  41. {webscout-4.7.dist-info → webscout-4.9.dist-info}/METADATA +249 -323
  42. webscout-4.9.dist-info/RECORD +83 -0
  43. webscout/GoogleS.py +0 -342
  44. webscout/Provider/Geminipro.py +0 -152
  45. webscout/Provider/Leo.py +0 -469
  46. webscout/Provider/OpenGPT.py +0 -867
  47. webscout/Provider/Xjai.py +0 -230
  48. webscout/Provider/Yepchat.py +0 -478
  49. webscout-4.7.dist-info/RECORD +0 -80
  50. {webscout-4.7.dist-info → webscout-4.9.dist-info}/LICENSE.md +0 -0
  51. {webscout-4.7.dist-info → webscout-4.9.dist-info}/WHEEL +0 -0
  52. {webscout-4.7.dist-info → webscout-4.9.dist-info}/entry_points.txt +0 -0
  53. {webscout-4.7.dist-info → webscout-4.9.dist-info}/top_level.txt +0 -0
@@ -13,10 +13,10 @@ import io
13
13
  import re
14
14
  import json
15
15
  import yaml
16
- from ..AIutel import Optimizers
17
- from ..AIutel import Conversation
18
- from ..AIutel import AwesomePrompts, sanitize_stream
19
- from ..AIbase import Provider, AsyncProvider
16
+ from webscout.AIutel import Optimizers
17
+ from webscout.AIutel import Conversation
18
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
19
+ from webscout.AIbase import Provider, AsyncProvider
20
20
  from webscout import exceptions
21
21
  from typing import Any, AsyncGenerator, Dict
22
22
  import logging
@@ -182,6 +182,7 @@ class OLLAMA(Provider):
182
182
  assert isinstance(response, dict), "Response should be of dict data-type only"
183
183
  return response["text"]
184
184
  if __name__ == "__main__":
185
- ollama_provider = OLLAMA(model="qwen2:0.5b")
186
- response = ollama_provider.chat("What is the meaning of life?")
187
- print(response)
185
+ ollama_provider = OLLAMA(model="qwen:0.5b")
186
+ response = ollama_provider.chat("hi")
187
+ for r in response:
188
+ print(r, end="", flush=True)
@@ -1,13 +1,16 @@
1
+ import cloudscraper
2
+ import json
3
+
1
4
  import requests
2
- from ..AIbase import Provider
3
- from ..AIutel import Conversation
4
- from ..AIutel import Optimizers
5
- from ..AIutel import AwesomePrompts
6
- from webscout import exceptions
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
7
9
 
8
- class GEMINIFLASH(Provider):
10
+ class PiAI(Provider):
9
11
  def __init__(
10
12
  self,
13
+ conversation_id: str,
11
14
  is_conversation: bool = True,
12
15
  max_tokens: int = 600,
13
16
  timeout: int = 30,
@@ -18,9 +21,10 @@ class GEMINIFLASH(Provider):
18
21
  history_offset: int = 10250,
19
22
  act: str = None,
20
23
  ):
21
- """Initializes GEMINI
24
+ """Instantiates PiAI
22
25
 
23
26
  Args:
27
+ conversation_id (str): The conversation ID for the Pi.ai chat.
24
28
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
25
29
  max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
26
30
  timeout (int, optional): Http request timeout. Defaults to 30.
@@ -31,17 +35,44 @@ class GEMINIFLASH(Provider):
31
35
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
32
36
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
33
37
  """
38
+ self.conversation_id = conversation_id
39
+ self.scraper = cloudscraper.create_scraper()
40
+ self.url = 'https://pi.ai/api/chat'
41
+ self.headers = {
42
+ 'Accept': 'text/event-stream',
43
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
44
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
45
+ 'Content-Type': 'application/json',
46
+ 'DNT': '1',
47
+ 'Origin': 'https://pi.ai',
48
+ 'Referer': 'https://pi.ai/talk',
49
+ 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
50
+ 'Sec-CH-UA-Mobile': '?0',
51
+ 'Sec-CH-UA-Platform': '"Windows"',
52
+ 'Sec-Fetch-Dest': 'empty',
53
+ 'Sec-Fetch-Mode': 'cors',
54
+ 'Sec-Fetch-Site': 'same-origin',
55
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0',
56
+ 'X-Api-Version': '3'
57
+ }
58
+ self.cookies = {
59
+ '__Host-session': 'Ca5SoyAMJEaaB79jj1T69',
60
+ '__cf_bm': 'g07oaL0jcstNfKDyZv7_YFjN0jnuBZjbMiXOWhy7V7A-1723536536-1.0.1.1-xwukd03L7oIAUqPG.OHbFNatDdHGZ28mRGsbsqfjBlpuy.b8w6UZIk8F3knMhhtNzwo4JQhBVdtYOlG0MvAw8A'
61
+ }
62
+
34
63
  self.session = requests.Session()
35
64
  self.is_conversation = is_conversation
36
65
  self.max_tokens_to_sample = max_tokens
37
- self.chat_endpoint = "https://gemini-flash.developer-house.workers.dev"
66
+ self.stream_chunk_size = 64
38
67
  self.timeout = timeout
39
68
  self.last_response = {}
69
+
40
70
  self.__available_optimizers = (
41
71
  method
42
72
  for method in dir(Optimizers)
43
73
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
44
74
  )
75
+ self.session.headers.update(self.headers)
45
76
  Conversation.intro = (
46
77
  AwesomePrompts().get_act(
47
78
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -75,7 +106,7 @@ class GEMINIFLASH(Provider):
75
106
  dict : {}
76
107
  ```json
77
108
  {
78
- "text" : "How may I assist you today?"
109
+ "text" : "How may I assist you today?"
79
110
  }
80
111
  ```
81
112
  """
@@ -89,28 +120,38 @@ class GEMINIFLASH(Provider):
89
120
  raise Exception(
90
121
  f"Optimizer is not one of {self.__available_optimizers}"
91
122
  )
92
- self.session.headers.update(
93
- {
94
- "Content-Type": "application/json",
95
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
96
- }
97
- )
98
- payload = {"question": conversation_prompt}
99
123
 
100
- response = self.session.get(
101
- self.chat_endpoint, params=payload, stream=True, timeout=self.timeout
102
- )
103
- if not response.ok:
104
- raise exceptions.FailedToGenerateResponseError(
105
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
124
+ data = {
125
+ 'text': conversation_prompt,
126
+ 'conversation': self.conversation_id
127
+ }
128
+
129
+ def for_stream():
130
+ response = self.scraper.post(self.url, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout)
131
+
132
+ streaming_text = ""
133
+ for line in response.iter_lines(decode_unicode=True):
134
+ if line.startswith("data: "):
135
+ json_data = line[6:]
136
+ try:
137
+ parsed_data = json.loads(json_data)
138
+ if 'text' in parsed_data:
139
+ streaming_text += parsed_data['text']
140
+ resp = dict(text=streaming_text)
141
+ self.last_response.update(resp)
142
+ yield parsed_data if raw else resp
143
+ except json.JSONDecodeError:
144
+ continue
145
+ self.conversation.update_chat_history(
146
+ prompt, self.get_message(self.last_response)
106
147
  )
107
-
108
- resp = response.json()
109
- self.last_response.update(resp)
110
- self.conversation.update_chat_history(
111
- prompt, self.get_message(self.last_response)
112
- )
113
- return resp
148
+
149
+ def for_non_stream():
150
+ for _ in for_stream():
151
+ pass
152
+ return self.last_response
153
+
154
+ return for_stream() if stream else for_non_stream()
114
155
 
115
156
  def chat(
116
157
  self,
@@ -128,13 +169,24 @@ class GEMINIFLASH(Provider):
128
169
  Returns:
129
170
  str: Response generated
130
171
  """
131
- return self.get_message(
132
- self.ask(
133
- prompt,
134
- optimizer=optimizer,
135
- conversationally=conversationally,
136
- )
137
- )
172
+
173
+ def for_stream():
174
+ for response in self.ask(
175
+ prompt, True, optimizer=optimizer, conversationally=conversationally
176
+ ):
177
+ yield self.get_message(response).encode('utf-8').decode('utf-8')
178
+
179
+ def for_non_stream():
180
+ return self.get_message(
181
+ self.ask(
182
+ prompt,
183
+ False,
184
+ optimizer=optimizer,
185
+ conversationally=conversationally,
186
+ )
187
+ ).encode('utf-8').decode('utf-8')
188
+
189
+ return for_stream() if stream else for_non_stream()
138
190
 
139
191
  def get_message(self, response: dict) -> str:
140
192
  """Retrieves message only from response
@@ -146,7 +198,11 @@ class GEMINIFLASH(Provider):
146
198
  str: Message extracted
147
199
  """
148
200
  assert isinstance(response, dict), "Response should be of dict data-type only"
149
- try:
150
- return response["content"]
151
- except KeyError:
152
- return ""
201
+ return response["text"]
202
+
203
+ if __name__ == '__main__':
204
+ from rich import print
205
+ ai = PiAI(conversation_id="6kti6HPbUKKWUAEpeD7vQ")
206
+ response = ai.chat(input(">>> "))
207
+ for chunk in response:
208
+ print(chunk, end="", flush=True)