webscout 4.8__py3-none-any.whl → 5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -0,0 +1,247 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from webscout.AIutel import Optimizers
23
+ from webscout.AIutel import Conversation
24
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
25
+ from webscout.AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+ import cloudscraper
31
+
32
+
33
+ class YouChat(Provider):
34
+ """
35
+ This class provides methods for interacting with the You.com chat API in a consistent provider structure.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ is_conversation: bool = True,
41
+ max_tokens: int = 600,
42
+ timeout: int = 30,
43
+ intro: str = None,
44
+ filepath: str = None,
45
+ update_file: bool = True,
46
+ proxies: dict = {},
47
+ history_offset: int = 10250,
48
+ act: str = None,
49
+ ):
50
+ """Instantiates YouChat
51
+
52
+ Args:
53
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
54
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
55
+ timeout (int, optional): Http request timeout. Defaults to 30.
56
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
57
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
58
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
59
+ proxies (dict, optional): Http request proxies. Defaults to {}.
60
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
61
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
62
+ """
63
+ self.session = cloudscraper.create_scraper() # Create a Cloudscraper session
64
+ self.is_conversation = is_conversation
65
+ self.max_tokens_to_sample = max_tokens
66
+ self.chat_endpoint = "https://you.com/api/streamingSearch"
67
+ self.stream_chunk_size = 64
68
+ self.timeout = timeout
69
+ self.last_response = {}
70
+ self.headers = {
71
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
72
+ "Accept": "text/event-stream",
73
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
74
+ "Referer": "https://you.com/search?q=hi&fromSearchBar=true&tbm=youchat",
75
+ "Connection": "keep-alive",
76
+ "DNT": "1",
77
+ }
78
+ self.cookies = {
79
+ "uuid_guest_backup": uuid4().hex,
80
+ "youchat_personalization": "true",
81
+ "youchat_smart_learn": "true",
82
+ "youpro_subscription": "false",
83
+ "ydc_stytch_session": uuid4().hex,
84
+ "ydc_stytch_session_jwt": uuid4().hex,
85
+ "__cf_bm": uuid4().hex,
86
+ }
87
+
88
+ self.__available_optimizers = (
89
+ method
90
+ for method in dir(Optimizers)
91
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
+ )
93
+ Conversation.intro = (
94
+ AwesomePrompts().get_act(
95
+ act, raise_not_found=True, default=None, case_insensitive=True
96
+ )
97
+ if act
98
+ else intro or Conversation.intro
99
+ )
100
+ self.conversation = Conversation(
101
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
102
+ )
103
+ self.conversation.history_offset = history_offset
104
+ self.session.proxies = proxies
105
+
106
+ def ask(
107
+ self,
108
+ prompt: str,
109
+ stream: bool = False,
110
+ raw: bool = False,
111
+ optimizer: str = None,
112
+ conversationally: bool = False,
113
+ ) -> dict:
114
+ """Chat with AI
115
+
116
+ Args:
117
+ prompt (str): Prompt to be send.
118
+ stream (bool, optional): Flag for streaming response. Defaults to False.
119
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
120
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
121
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
122
+ Returns:
123
+ dict : {}
124
+ ```json
125
+ {
126
+ "text" : "How may I assist you today?"
127
+ }
128
+ ```
129
+ """
130
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
131
+ if optimizer:
132
+ if optimizer in self.__available_optimizers:
133
+ conversation_prompt = getattr(Optimizers, optimizer)(
134
+ conversation_prompt if conversationally else prompt
135
+ )
136
+ else:
137
+ raise Exception(
138
+ f"Optimizer is not one of {self.__available_optimizers}"
139
+ )
140
+
141
+ payload = {
142
+ "q": conversation_prompt,
143
+ "page": 1,
144
+ "count": 10,
145
+ "safeSearch": "Moderate",
146
+ "mkt": "en-IN",
147
+ "domain": "youchat",
148
+ "use_personalization_extraction": "true",
149
+ "queryTraceId": str(uuid4()),
150
+ "chatId": str(uuid4()),
151
+ "conversationTurnId": str(uuid4()),
152
+ "pastChatLength": 0,
153
+ "isSmallMediumDevice": "true",
154
+ "selectedChatMode": "default",
155
+ "traceId": str(uuid4()),
156
+ "chat": "[]"
157
+ }
158
+
159
+ def for_stream():
160
+ response = self.session.get(
161
+ self.chat_endpoint, headers=self.headers, cookies=self.cookies, params=payload, stream=True, timeout=self.timeout
162
+ )
163
+ if not response.ok:
164
+ raise exceptions.FailedToGenerateResponseError(
165
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
166
+ )
167
+
168
+ streaming_text = ""
169
+ for value in response.iter_lines(
170
+ decode_unicode=True,
171
+ chunk_size=self.stream_chunk_size,
172
+ delimiter="\n",
173
+ ):
174
+ try:
175
+ if bool(value) and value.startswith('data: ') and 'youChatToken' in value:
176
+ data = json.loads(value[6:])
177
+ token = data.get('youChatToken', '')
178
+ if token:
179
+ streaming_text += token
180
+ resp = dict(text=streaming_text)
181
+ self.last_response.update(resp)
182
+ yield value if raw else resp
183
+ except json.decoder.JSONDecodeError:
184
+ pass
185
+ self.conversation.update_chat_history(
186
+ prompt, self.get_message(self.last_response)
187
+ )
188
+
189
+ def for_non_stream():
190
+ for _ in for_stream():
191
+ pass
192
+ return self.last_response
193
+
194
+ return for_stream() if stream else for_non_stream()
195
+
196
+ def chat(
197
+ self,
198
+ prompt: str,
199
+ stream: bool = False,
200
+ optimizer: str = None,
201
+ conversationally: bool = False,
202
+ ) -> str:
203
+ """Generate response `str`
204
+ Args:
205
+ prompt (str): Prompt to be send.
206
+ stream (bool, optional): Flag for streaming response. Defaults to False.
207
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
208
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
209
+ Returns:
210
+ str: Response generated
211
+ """
212
+
213
+ def for_stream():
214
+ for response in self.ask(
215
+ prompt, True, optimizer=optimizer, conversationally=conversationally
216
+ ):
217
+ yield self.get_message(response)
218
+
219
+ def for_non_stream():
220
+ return self.get_message(
221
+ self.ask(
222
+ prompt,
223
+ False,
224
+ optimizer=optimizer,
225
+ conversationally=conversationally,
226
+ )
227
+ )
228
+
229
+ return for_stream() if stream else for_non_stream()
230
+
231
+ def get_message(self, response: dict) -> str:
232
+ """Retrieves message only from response
233
+
234
+ Args:
235
+ response (dict): Response generated by `self.ask`
236
+
237
+ Returns:
238
+ str: Message extracted
239
+ """
240
+ assert isinstance(response, dict), "Response should be of dict data-type only"
241
+ return response["text"]
242
+ if __name__ == '__main__':
243
+ from rich import print
244
+ ai = YouChat()
245
+ response = ai.chat(input(">>> "))
246
+ for chunk in response:
247
+ print(chunk, end="", flush=True)
@@ -1,7 +1,7 @@
1
1
  # webscout/providers/__init__.py
2
2
 
3
3
  from .ThinkAnyAI import ThinkAnyAI
4
-
4
+ from .PI import *
5
5
  from .Llama import LLAMA
6
6
  from .Cohere import Cohere
7
7
  from .Reka import REKA
@@ -36,6 +36,13 @@ from .RUBIKSAI import *
36
36
  from .meta import *
37
37
  from .liaobots import *
38
38
  from .DiscordRocks import *
39
+ from .felo_search import *
40
+ from .xdash import *
41
+ from .julius import *
42
+ from .Youchat import *
43
+ from .yep import *
44
+ from .Cloudflare import *
45
+ from .turboseek import *
39
46
  __all__ = [
40
47
  'ThinkAnyAI',
41
48
  'Farfalle',
@@ -53,7 +60,7 @@ __all__ = [
53
60
  'AsyncBLACKBOXAI',
54
61
  'PhindSearch',
55
62
  'AsyncPhindSearch',
56
-
63
+ 'Felo',
57
64
  'GEMINI',
58
65
  'Berlin4h',
59
66
  'POE',
@@ -75,4 +82,11 @@ __all__ = [
75
82
  'Meta',
76
83
  'LiaoBots',
77
84
  'DiscordRocks',
85
+ 'PiAI',
86
+ 'XDASH',
87
+ 'Julius',
88
+ 'YouChat',
89
+ 'YEPCHAT',
90
+ 'Cloudflare',
91
+ 'TurboSeek',
78
92
  ]
@@ -0,0 +1,238 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from webscout.AIutel import Optimizers
23
+ from webscout.AIutel import Conversation
24
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
25
+ from webscout.AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+
31
+ class Felo(Provider):
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 600,
36
+ timeout: int = 30,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ ):
44
+ """Instantiates Felo
45
+
46
+ Args:
47
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
48
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
49
+ timeout (int, optional): Http request timeout. Defaults to 30.
50
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
51
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
52
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
53
+ proxies (dict, optional): Http request proxies. Defaults to {}.
54
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
55
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
56
+ """
57
+ self.session = requests.Session()
58
+ self.is_conversation = is_conversation
59
+ self.max_tokens_to_sample = max_tokens
60
+ self.chat_endpoint = "https://api.felo.ai/search/threads"
61
+ self.stream_chunk_size = 64
62
+ self.timeout = timeout
63
+ self.last_response = {}
64
+ self.headers = {
65
+ "accept": "*/*",
66
+ "accept-encoding": "gzip, deflate, br, zstd",
67
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
68
+ "content-type": "application/json",
69
+ "cookie": "_clck=1gifk45%7C2%7Cfoa%7C0%7C1686; _clsk=1g5lv07%7C1723558310439%7C1%7C1%7Cu.clarity.ms%2Fcollect; _ga=GA1.1.877307181.1723558313; _ga_8SZPRV97HV=GS1.1.1723558313.1.1.1723558341.0.0.0; _ga_Q9Q1E734CC=GS1.1.1723558313.1.1.1723558341.0.0.0",
70
+ "dnt": "1",
71
+ "origin": "https://felo.ai",
72
+ "referer": "https://felo.ai/",
73
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
74
+ "sec-ch-ua-mobile": "?0",
75
+ "sec-ch-ua-platform": '"Windows"',
76
+ "sec-fetch-dest": "empty",
77
+ "sec-fetch-mode": "cors",
78
+ "sec-fetch-site": "same-site",
79
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
80
+ }
81
+
82
+ self.__available_optimizers = (
83
+ method
84
+ for method in dir(Optimizers)
85
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
86
+ )
87
+ self.session.headers.update(self.headers)
88
+ Conversation.intro = (
89
+ AwesomePrompts().get_act(
90
+ act, raise_not_found=True, default=None, case_insensitive=True
91
+ )
92
+ if act
93
+ else intro or Conversation.intro
94
+ )
95
+ self.conversation = Conversation(
96
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
97
+ )
98
+ self.conversation.history_offset = history_offset
99
+ self.session.proxies = proxies
100
+
101
+ def ask(
102
+ self,
103
+ prompt: str,
104
+ stream: bool = False,
105
+ raw: bool = False,
106
+ optimizer: str = None,
107
+ conversationally: bool = False,
108
+ ) -> dict:
109
+ """Chat with AI
110
+
111
+ Args:
112
+ prompt (str): Prompt to be send.
113
+ stream (bool, optional): Flag for streaming response. Defaults to False.
114
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
115
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
116
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
117
+ Returns:
118
+ dict : {}
119
+ ```json
120
+ {
121
+ "text" : "How may I assist you today?"
122
+ }
123
+ ```
124
+ """
125
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
126
+ if optimizer:
127
+ if optimizer in self.__available_optimizers:
128
+ conversation_prompt = getattr(Optimizers, optimizer)(
129
+ conversation_prompt if conversationally else prompt
130
+ )
131
+ else:
132
+ raise Exception(
133
+ f"Optimizer is not one of {self.__available_optimizers}"
134
+ )
135
+
136
+ self.session.headers.update(self.headers)
137
+ payload = {
138
+ "query": conversation_prompt,
139
+ "search_uuid": uuid4().hex,
140
+ "lang": "",
141
+ "agent_lang": "en",
142
+ "search_options": {
143
+ "langcode": "en-US"
144
+ },
145
+ "search_video": True,
146
+ "contexts_from": "google"
147
+ }
148
+
149
+ def for_stream():
150
+ response = self.session.post(
151
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
152
+ )
153
+ if not response.ok:
154
+ raise exceptions.FailedToGenerateResponseError(
155
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
156
+ )
157
+
158
+ streaming_text = ""
159
+ for value in response.iter_lines(
160
+ decode_unicode=True,
161
+ chunk_size=self.stream_chunk_size,
162
+ delimiter="\n",
163
+ ):
164
+ try:
165
+ if bool(value) and value.startswith('data:'):
166
+ data = json.loads(value[len('data:'):].strip())
167
+ if data['type'] == 'a':
168
+ streaming_text += data['data']['k']
169
+ resp = dict(text=streaming_text)
170
+ self.last_response.update(resp)
171
+ yield value if raw else resp
172
+ except json.decoder.JSONDecodeError:
173
+ pass
174
+ self.conversation.update_chat_history(
175
+ prompt, self.get_message(self.last_response)
176
+ )
177
+
178
+ def for_non_stream():
179
+ for _ in for_stream():
180
+ pass
181
+ return self.last_response
182
+
183
+ return for_stream() if stream else for_non_stream()
184
+
185
+ def chat(
186
+ self,
187
+ prompt: str,
188
+ stream: bool = False,
189
+ optimizer: str = None,
190
+ conversationally: bool = False,
191
+ ) -> str:
192
+ """Generate response `str`
193
+ Args:
194
+ prompt (str): Prompt to be send.
195
+ stream (bool, optional): Flag for streaming response. Defaults to False.
196
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
197
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
198
+ Returns:
199
+ str: Response generated
200
+ """
201
+
202
+ def for_stream():
203
+ for response in self.ask(
204
+ prompt, True, optimizer=optimizer, conversationally=conversationally
205
+ ):
206
+ yield self.get_message(response)
207
+
208
+ def for_non_stream():
209
+ return self.get_message(
210
+ self.ask(
211
+ prompt,
212
+ False,
213
+ optimizer=optimizer,
214
+ conversationally=conversationally,
215
+ )
216
+ )
217
+
218
+ return for_stream() if stream else for_non_stream()
219
+
220
+ def get_message(self, response: dict) -> str:
221
+ """Retrieves message only from response
222
+
223
+ Args:
224
+ response (dict): Response generated by `self.ask`
225
+
226
+ Returns:
227
+ str: Message extracted
228
+ """
229
+ assert isinstance(response, dict), "Response should be of dict data-type only"
230
+
231
+ text = re.sub(r'\[\[\d+\]\]', '', response["text"])
232
+ return text
233
+ if __name__ == '__main__':
234
+ from rich import print
235
+ ai = Felo()
236
+ response = ai.chat(input(">>> "))
237
+ for chunk in response:
238
+ print(chunk, end="", flush=True)