webscout 1.4.0__py3-none-any.whl → 1.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AI.py +294 -185
- webscout/AIutel.py +2 -0
- webscout/__init__.py +3 -1
- webscout/async_providers.py +32 -32
- webscout/g4f.py +1 -1
- webscout/tempid.py +157 -0
- webscout/version.py +1 -1
- webscout/webai.py +31 -15
- {webscout-1.4.0.dist-info → webscout-1.4.2.dist-info}/METADATA +275 -58
- {webscout-1.4.0.dist-info → webscout-1.4.2.dist-info}/RECORD +14 -13
- {webscout-1.4.0.dist-info → webscout-1.4.2.dist-info}/LICENSE.md +0 -0
- {webscout-1.4.0.dist-info → webscout-1.4.2.dist-info}/WHEEL +0 -0
- {webscout-1.4.0.dist-info → webscout-1.4.2.dist-info}/entry_points.txt +0 -0
- {webscout-1.4.0.dist-info → webscout-1.4.2.dist-info}/top_level.txt +0 -0
webscout/AI.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import time
|
|
2
|
+
import uuid
|
|
2
3
|
from selenium import webdriver
|
|
3
4
|
from selenium.webdriver.chrome.options import Options
|
|
4
5
|
from selenium.webdriver.common.by import By
|
|
@@ -27,6 +28,206 @@ from webscout import exceptions
|
|
|
27
28
|
from typing import Any, AsyncGenerator
|
|
28
29
|
import logging
|
|
29
30
|
import httpx
|
|
31
|
+
#-----------------------------------------------xjai-------------------------------------------
|
|
32
|
+
class Xjai(Provider):
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
is_conversation: bool = True,
|
|
36
|
+
max_tokens: int = 600,
|
|
37
|
+
temperature: float = 0.8,
|
|
38
|
+
top_p: float = 1,
|
|
39
|
+
timeout: int = 30,
|
|
40
|
+
intro: str = None,
|
|
41
|
+
filepath: str = None,
|
|
42
|
+
update_file: bool = True,
|
|
43
|
+
proxies: dict = {},
|
|
44
|
+
history_offset: int = 10250,
|
|
45
|
+
act: str = None,
|
|
46
|
+
):
|
|
47
|
+
"""
|
|
48
|
+
Initializes the Xjai class for interacting with the Xjai AI chat API.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
52
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
53
|
+
temperature (float, optional): The creativity level of the AI's response. Defaults to 0.8.
|
|
54
|
+
top_p (float, optional): The probability threshold for token selection. Defaults to 1.
|
|
55
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
56
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
57
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
58
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
59
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
60
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
61
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
62
|
+
"""
|
|
63
|
+
self.session = requests.Session()
|
|
64
|
+
self.is_conversation = is_conversation
|
|
65
|
+
self.max_tokens_to_sample = max_tokens
|
|
66
|
+
self.temperature = temperature
|
|
67
|
+
self.top_p = top_p
|
|
68
|
+
self.chat_endpoint = "https://p1api.xjai.pro/freeapi/chat-process"
|
|
69
|
+
self.stream_chunk_size = 1 # Process response line by line
|
|
70
|
+
self.timeout = timeout
|
|
71
|
+
self.last_response = {}
|
|
72
|
+
|
|
73
|
+
self.__available_optimizers = (
|
|
74
|
+
method
|
|
75
|
+
for method in dir(Optimizers)
|
|
76
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
77
|
+
)
|
|
78
|
+
Conversation.intro = (
|
|
79
|
+
AwesomePrompts().get_act(
|
|
80
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
81
|
+
)
|
|
82
|
+
if act
|
|
83
|
+
else intro or Conversation.intro
|
|
84
|
+
)
|
|
85
|
+
self.conversation = Conversation(
|
|
86
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
87
|
+
)
|
|
88
|
+
self.conversation.history_offset = history_offset
|
|
89
|
+
self.session.proxies = proxies
|
|
90
|
+
|
|
91
|
+
def ask(
|
|
92
|
+
self,
|
|
93
|
+
prompt: str,
|
|
94
|
+
stream: bool = False,
|
|
95
|
+
raw: bool = False,
|
|
96
|
+
optimizer: str = None,
|
|
97
|
+
conversationally: bool = False,
|
|
98
|
+
) -> Any:
|
|
99
|
+
"""
|
|
100
|
+
Sends a chat request to the Xjai AI chat API and returns the response.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
prompt (str): The query to send to the AI.
|
|
104
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
105
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
106
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
107
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Any: The response from the AI, either as a dictionary or a generator
|
|
111
|
+
depending on the `stream` and `raw` parameters.
|
|
112
|
+
"""
|
|
113
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
114
|
+
if optimizer:
|
|
115
|
+
if optimizer in self.__available_optimizers:
|
|
116
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
117
|
+
conversation_prompt if conversationally else prompt
|
|
118
|
+
)
|
|
119
|
+
else:
|
|
120
|
+
raise Exception(
|
|
121
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
headers = {
|
|
125
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
|
|
126
|
+
"(KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
payload = {
|
|
130
|
+
"prompt": conversation_prompt + "\n\nReply in English Only",
|
|
131
|
+
"systemMessage": "Reply in English Only",
|
|
132
|
+
"temperature": self.temperature,
|
|
133
|
+
"top_p": self.top_p
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
def generate_response():
|
|
137
|
+
response = self.session.post(
|
|
138
|
+
self.chat_endpoint, headers=headers, json=payload, stream=True, timeout=self.timeout
|
|
139
|
+
)
|
|
140
|
+
output = ""
|
|
141
|
+
print_next = False
|
|
142
|
+
|
|
143
|
+
for line in response.iter_lines(decode_unicode=True, chunk_size=self.stream_chunk_size):
|
|
144
|
+
line_content = line.decode("utf-8")
|
|
145
|
+
# Filter out irrelevant content
|
|
146
|
+
if '[ChatAI](https://srv.aiflarepro.com/#/?cid=4111)' in line_content:
|
|
147
|
+
continue
|
|
148
|
+
if '&KFw6loC9Qvy&' in line_content:
|
|
149
|
+
parts = line_content.split('&KFw6loC9Qvy&')
|
|
150
|
+
if print_next:
|
|
151
|
+
output += parts[0]
|
|
152
|
+
print_next = False
|
|
153
|
+
else:
|
|
154
|
+
output += parts[1]
|
|
155
|
+
print_next = True
|
|
156
|
+
if len(parts) > 2:
|
|
157
|
+
print_next = False
|
|
158
|
+
elif print_next:
|
|
159
|
+
output += line_content + '\n'
|
|
160
|
+
|
|
161
|
+
# Update chat history
|
|
162
|
+
self.conversation.update_chat_history(prompt, output)
|
|
163
|
+
|
|
164
|
+
return output
|
|
165
|
+
|
|
166
|
+
def for_stream():
|
|
167
|
+
response = generate_response()
|
|
168
|
+
for line in response.splitlines():
|
|
169
|
+
yield line if raw else dict(text=line)
|
|
170
|
+
|
|
171
|
+
def for_non_stream():
|
|
172
|
+
response = generate_response()
|
|
173
|
+
return response if raw else dict(text=response)
|
|
174
|
+
|
|
175
|
+
return for_stream() if stream else for_non_stream()
|
|
176
|
+
|
|
177
|
+
def chat(
|
|
178
|
+
self,
|
|
179
|
+
prompt: str,
|
|
180
|
+
stream: bool = False,
|
|
181
|
+
optimizer: str = None,
|
|
182
|
+
conversationally: bool = False,
|
|
183
|
+
) -> Any:
|
|
184
|
+
"""
|
|
185
|
+
Generates a response from the Xjai AI chat API.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
prompt (str): The query to send to the AI.
|
|
189
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
190
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
191
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
Any: The response from the AI, either as a string or a generator
|
|
195
|
+
depending on the `stream` parameter.
|
|
196
|
+
"""
|
|
197
|
+
|
|
198
|
+
def for_stream():
|
|
199
|
+
for response in self.ask(
|
|
200
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
201
|
+
):
|
|
202
|
+
yield self.get_message(response)
|
|
203
|
+
|
|
204
|
+
def for_non_stream():
|
|
205
|
+
return self.get_message(
|
|
206
|
+
self.ask(
|
|
207
|
+
prompt,
|
|
208
|
+
False,
|
|
209
|
+
optimizer=optimizer,
|
|
210
|
+
conversationally=conversationally,
|
|
211
|
+
)
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
return for_stream() if stream else for_non_stream()
|
|
215
|
+
|
|
216
|
+
def get_message(self, response: Any) -> str:
|
|
217
|
+
"""
|
|
218
|
+
Retrieves the message from the AI's response.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
response (Any): The response from the AI, either a dictionary
|
|
222
|
+
or a raw string.
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
str: The extracted message from the AI's response.
|
|
226
|
+
"""
|
|
227
|
+
if isinstance(response, dict):
|
|
228
|
+
return response["text"]
|
|
229
|
+
else: # Assume raw string
|
|
230
|
+
return response
|
|
30
231
|
#-----------------------------------------------llama 3-------------------------------------------
|
|
31
232
|
class LLAMA2(Provider):
|
|
32
233
|
def __init__(
|
|
@@ -4373,8 +4574,6 @@ class YEPCHAT(Provider):
|
|
|
4373
4574
|
return response["choices"][0]["message"]["content"]
|
|
4374
4575
|
except KeyError:
|
|
4375
4576
|
return ""
|
|
4376
|
-
|
|
4377
|
-
|
|
4378
4577
|
class AsyncYEPCHAT(AsyncProvider):
|
|
4379
4578
|
def __init__(
|
|
4380
4579
|
self,
|
|
@@ -4598,16 +4797,13 @@ class AsyncYEPCHAT(AsyncProvider):
|
|
|
4598
4797
|
return response["choices"][0]["message"]["content"]
|
|
4599
4798
|
except KeyError:
|
|
4600
4799
|
return ""
|
|
4601
|
-
|
|
4800
|
+
|
|
4801
|
+
#-------------------------------------------------------youchat--------------------------------------------------------
|
|
4802
|
+
class YouChat(Provider):
|
|
4602
4803
|
def __init__(
|
|
4603
4804
|
self,
|
|
4604
4805
|
is_conversation: bool = True,
|
|
4605
4806
|
max_tokens: int = 600,
|
|
4606
|
-
temperature: float = 0.6,
|
|
4607
|
-
presence_penalty: int = 0,
|
|
4608
|
-
frequency_penalty: int = 0,
|
|
4609
|
-
top_p: float = 0.7,
|
|
4610
|
-
model: str = "Mixtral-8x7B-Instruct-v0.1",
|
|
4611
4807
|
timeout: int = 30,
|
|
4612
4808
|
intro: str = None,
|
|
4613
4809
|
filepath: str = None,
|
|
@@ -4616,43 +4812,34 @@ class AsyncYEPCHAT(AsyncProvider):
|
|
|
4616
4812
|
history_offset: int = 10250,
|
|
4617
4813
|
act: str = None,
|
|
4618
4814
|
):
|
|
4619
|
-
|
|
4620
|
-
|
|
4621
|
-
Args:
|
|
4622
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
4623
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
4624
|
-
temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.6.
|
|
4625
|
-
presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
|
|
4626
|
-
frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
|
|
4627
|
-
top_p (float, optional): Sampling threshold during inference time. Defaults to 0.7.
|
|
4628
|
-
model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
|
|
4629
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
4630
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
4631
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
4632
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
4633
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
4634
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
4635
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
4636
|
-
"""
|
|
4815
|
+
self.session = requests.Session()
|
|
4637
4816
|
self.is_conversation = is_conversation
|
|
4638
4817
|
self.max_tokens_to_sample = max_tokens
|
|
4639
|
-
self.
|
|
4640
|
-
self.temperature = temperature
|
|
4641
|
-
self.presence_penalty = presence_penalty
|
|
4642
|
-
self.frequency_penalty = frequency_penalty
|
|
4643
|
-
self.top_p = top_p
|
|
4644
|
-
self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
|
|
4818
|
+
self.chat_endpoint = "https://you.com/api/streamingSearch"
|
|
4645
4819
|
self.stream_chunk_size = 64
|
|
4646
4820
|
self.timeout = timeout
|
|
4647
4821
|
self.last_response = {}
|
|
4822
|
+
|
|
4823
|
+
self.payload = {
|
|
4824
|
+
"q": "",
|
|
4825
|
+
"page": 1,
|
|
4826
|
+
"count": 10,
|
|
4827
|
+
"safeSearch": "Off",
|
|
4828
|
+
"onShoppingPage": False,
|
|
4829
|
+
"mkt": "",
|
|
4830
|
+
"responseFilter": "WebPages,Translations,TimeZone,Computation,RelatedSearches",
|
|
4831
|
+
"domain": "youchat",
|
|
4832
|
+
"queryTraceId": uuid.uuid4(),
|
|
4833
|
+
"conversationTurnId": uuid.uuid4(),
|
|
4834
|
+
"pastChatLength": 0,
|
|
4835
|
+
"selectedChatMode": "default",
|
|
4836
|
+
"chat": "[]",
|
|
4837
|
+
}
|
|
4838
|
+
|
|
4648
4839
|
self.headers = {
|
|
4649
|
-
"
|
|
4650
|
-
|
|
4651
|
-
|
|
4652
|
-
"Content-Type": "application/json; charset=utf-8",
|
|
4653
|
-
"Origin": "https://yep.com",
|
|
4654
|
-
"Referer": "https://yep.com/",
|
|
4655
|
-
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
4840
|
+
"cache-control": "no-cache",
|
|
4841
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
|
|
4842
|
+
'Referer': f'https://you.com/search?q={self.payload["q"]}&fromSearchBar=true&tbm=youchat&chatMode=default'
|
|
4656
4843
|
}
|
|
4657
4844
|
|
|
4658
4845
|
self.__available_optimizers = (
|
|
@@ -4660,6 +4847,7 @@ class AsyncYEPCHAT(AsyncProvider):
|
|
|
4660
4847
|
for method in dir(Optimizers)
|
|
4661
4848
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
4662
4849
|
)
|
|
4850
|
+
self.session.headers.update(self.headers)
|
|
4663
4851
|
Conversation.intro = (
|
|
4664
4852
|
AwesomePrompts().get_act(
|
|
4665
4853
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -4671,12 +4859,9 @@ class AsyncYEPCHAT(AsyncProvider):
|
|
|
4671
4859
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
4672
4860
|
)
|
|
4673
4861
|
self.conversation.history_offset = history_offset
|
|
4674
|
-
self.session =
|
|
4675
|
-
headers=self.headers,
|
|
4676
|
-
proxies=proxies,
|
|
4677
|
-
)
|
|
4862
|
+
self.session.proxies = proxies
|
|
4678
4863
|
|
|
4679
|
-
|
|
4864
|
+
def ask(
|
|
4680
4865
|
self,
|
|
4681
4866
|
prompt: str,
|
|
4682
4867
|
stream: bool = False,
|
|
@@ -4684,35 +4869,6 @@ class AsyncYEPCHAT(AsyncProvider):
|
|
|
4684
4869
|
optimizer: str = None,
|
|
4685
4870
|
conversationally: bool = False,
|
|
4686
4871
|
) -> dict:
|
|
4687
|
-
"""Chat with AI asynchronously.
|
|
4688
|
-
|
|
4689
|
-
Args:
|
|
4690
|
-
prompt (str): Prompt to be send.
|
|
4691
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
4692
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
4693
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
4694
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
4695
|
-
Returns:
|
|
4696
|
-
dict : {}
|
|
4697
|
-
```json
|
|
4698
|
-
{
|
|
4699
|
-
"id": "cmpl-c61c1c88de4e4ad3a79134775d17ea0c",
|
|
4700
|
-
"object": "chat.completion.chunk",
|
|
4701
|
-
"created": 1713876886,
|
|
4702
|
-
"model": "Mixtral-8x7B-Instruct-v0.1",
|
|
4703
|
-
"choices": [
|
|
4704
|
-
{
|
|
4705
|
-
"index": 0,
|
|
4706
|
-
"delta": {
|
|
4707
|
-
"role": null,
|
|
4708
|
-
"content": " Sure, I can help with that. Are you looking for information on how to start coding, or do you need help with a specific coding problem? We can discuss various programming languages like Python, JavaScript, Java, C++, or others. Please provide more details so I can assist you better."
|
|
4709
|
-
},
|
|
4710
|
-
"finish_reason": null
|
|
4711
|
-
}
|
|
4712
|
-
]
|
|
4713
|
-
}
|
|
4714
|
-
```
|
|
4715
|
-
"""
|
|
4716
4872
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
4717
4873
|
if optimizer:
|
|
4718
4874
|
if optimizer in self.__available_optimizers:
|
|
@@ -4723,58 +4879,60 @@ class AsyncYEPCHAT(AsyncProvider):
|
|
|
4723
4879
|
raise Exception(
|
|
4724
4880
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
4725
4881
|
)
|
|
4726
|
-
|
|
4727
|
-
|
|
4728
|
-
|
|
4729
|
-
|
|
4730
|
-
|
|
4731
|
-
|
|
4732
|
-
|
|
4733
|
-
}
|
|
4882
|
+
self.session.headers.update(self.headers)
|
|
4883
|
+
self.session.headers.update(
|
|
4884
|
+
dict(
|
|
4885
|
+
cookie=f"safesearch_guest=Off; uuid_guest={str(uuid4())}",
|
|
4886
|
+
)
|
|
4887
|
+
)
|
|
4888
|
+
self.payload["q"] = prompt
|
|
4734
4889
|
|
|
4735
|
-
|
|
4736
|
-
|
|
4737
|
-
|
|
4738
|
-
|
|
4739
|
-
|
|
4740
|
-
|
|
4741
|
-
|
|
4742
|
-
)
|
|
4890
|
+
def for_stream():
|
|
4891
|
+
response = self.session.get(
|
|
4892
|
+
self.chat_endpoint,
|
|
4893
|
+
params=self.payload,
|
|
4894
|
+
stream=True,
|
|
4895
|
+
timeout=self.timeout,
|
|
4896
|
+
)
|
|
4743
4897
|
|
|
4744
|
-
|
|
4745
|
-
|
|
4746
|
-
|
|
4747
|
-
|
|
4748
|
-
incomplete_message = await self.get_message(resp)
|
|
4749
|
-
if incomplete_message:
|
|
4750
|
-
message_load += incomplete_message
|
|
4751
|
-
resp["choices"][0]["delta"]["content"] = message_load
|
|
4752
|
-
self.last_response.update(resp)
|
|
4753
|
-
yield value if raw else resp
|
|
4754
|
-
elif raw:
|
|
4755
|
-
yield value
|
|
4756
|
-
except json.decoder.JSONDecodeError:
|
|
4757
|
-
pass
|
|
4898
|
+
if not response.ok:
|
|
4899
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
4900
|
+
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
4901
|
+
)
|
|
4758
4902
|
|
|
4903
|
+
streaming_response = ""
|
|
4904
|
+
for line in response.iter_lines(decode_unicode=True, chunk_size=64):
|
|
4905
|
+
if line:
|
|
4906
|
+
modified_value = re.sub("data:", "", line)
|
|
4907
|
+
try:
|
|
4908
|
+
json_modified_value = json.loads(modified_value)
|
|
4909
|
+
if "youChatToken" in json_modified_value:
|
|
4910
|
+
streaming_response += json_modified_value["youChatToken"]
|
|
4911
|
+
if print:
|
|
4912
|
+
print(json_modified_value["youChatToken"], end="")
|
|
4913
|
+
except:
|
|
4914
|
+
continue
|
|
4915
|
+
self.last_response.update(dict(text=streaming_response))
|
|
4759
4916
|
self.conversation.update_chat_history(
|
|
4760
|
-
prompt,
|
|
4917
|
+
prompt, self.get_message(self.last_response)
|
|
4761
4918
|
)
|
|
4919
|
+
return streaming_response
|
|
4762
4920
|
|
|
4763
|
-
|
|
4764
|
-
|
|
4921
|
+
def for_non_stream():
|
|
4922
|
+
for _ in for_stream():
|
|
4765
4923
|
pass
|
|
4766
4924
|
return self.last_response
|
|
4767
4925
|
|
|
4768
|
-
return for_stream() if stream else
|
|
4926
|
+
return for_stream() if stream else for_non_stream()
|
|
4769
4927
|
|
|
4770
|
-
|
|
4928
|
+
def chat(
|
|
4771
4929
|
self,
|
|
4772
4930
|
prompt: str,
|
|
4773
4931
|
stream: bool = False,
|
|
4774
4932
|
optimizer: str = None,
|
|
4775
4933
|
conversationally: bool = False,
|
|
4776
4934
|
) -> str:
|
|
4777
|
-
"""Generate response `str`
|
|
4935
|
+
"""Generate response `str`
|
|
4778
4936
|
Args:
|
|
4779
4937
|
prompt (str): Prompt to be send.
|
|
4780
4938
|
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
@@ -4784,17 +4942,32 @@ class AsyncYEPCHAT(AsyncProvider):
|
|
|
4784
4942
|
str: Response generated
|
|
4785
4943
|
"""
|
|
4786
4944
|
|
|
4787
|
-
|
|
4788
|
-
|
|
4789
|
-
|
|
4790
|
-
|
|
4945
|
+
def chat(
|
|
4946
|
+
self,
|
|
4947
|
+
prompt: str,
|
|
4948
|
+
stream: bool = False,
|
|
4949
|
+
optimizer: str = None,
|
|
4950
|
+
conversationally: bool = False,
|
|
4951
|
+
) -> str:
|
|
4952
|
+
"""Generate response `str`
|
|
4953
|
+
Args:
|
|
4954
|
+
prompt (str): Prompt to be send.
|
|
4955
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
4956
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
4957
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
4958
|
+
Returns:
|
|
4959
|
+
str: Response generated
|
|
4960
|
+
"""
|
|
4791
4961
|
|
|
4792
|
-
|
|
4793
|
-
|
|
4962
|
+
def for_stream():
|
|
4963
|
+
for response in self.ask(
|
|
4964
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
4965
|
+
):
|
|
4966
|
+
yield self.get_message(response)
|
|
4794
4967
|
|
|
4795
|
-
|
|
4796
|
-
return
|
|
4797
|
-
|
|
4968
|
+
def for_non_stream():
|
|
4969
|
+
return self.get_message(
|
|
4970
|
+
self.ask(
|
|
4798
4971
|
prompt,
|
|
4799
4972
|
False,
|
|
4800
4973
|
optimizer=optimizer,
|
|
@@ -4802,9 +4975,9 @@ class AsyncYEPCHAT(AsyncProvider):
|
|
|
4802
4975
|
)
|
|
4803
4976
|
)
|
|
4804
4977
|
|
|
4805
|
-
return for_stream() if stream else
|
|
4978
|
+
return for_stream() if stream else for_non_stream()
|
|
4806
4979
|
|
|
4807
|
-
|
|
4980
|
+
def get_message(self, response: dict) -> str:
|
|
4808
4981
|
"""Retrieves message only from response
|
|
4809
4982
|
|
|
4810
4983
|
Args:
|
|
@@ -4814,71 +4987,7 @@ class AsyncYEPCHAT(AsyncProvider):
|
|
|
4814
4987
|
str: Message extracted
|
|
4815
4988
|
"""
|
|
4816
4989
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
4817
|
-
|
|
4818
|
-
if response["choices"][0].get("delta"):
|
|
4819
|
-
return response["choices"][0]["delta"]["content"]
|
|
4820
|
-
return response["choices"][0]["message"]["content"]
|
|
4821
|
-
except KeyError:
|
|
4822
|
-
return ""
|
|
4823
|
-
#-------------------------------------------------------youchat--------------------------------------------------------
|
|
4824
|
-
class youChat:
|
|
4825
|
-
"""
|
|
4826
|
-
This class provides methods for generating completions based on prompts.
|
|
4827
|
-
"""
|
|
4828
|
-
def create(self, prompt):
|
|
4829
|
-
"""
|
|
4830
|
-
Generate a completion based on the provided prompt.
|
|
4831
|
-
|
|
4832
|
-
Args:
|
|
4833
|
-
prompt (str): The input prompt to generate a completion from.
|
|
4834
|
-
|
|
4835
|
-
Returns:
|
|
4836
|
-
str: The generated completion as a text string.
|
|
4837
|
-
|
|
4838
|
-
Raises:
|
|
4839
|
-
Exception: If the response does not contain the expected "youChatToken".
|
|
4840
|
-
"""
|
|
4841
|
-
resp = get(
|
|
4842
|
-
"https://you.com/api/streamingSearch",
|
|
4843
|
-
headers={
|
|
4844
|
-
"cache-control": "no-cache",
|
|
4845
|
-
"referer": "https://you.com/search?q=gpt4&tbm=youchat",
|
|
4846
|
-
"cookie": f"safesearch_guest=Off; uuid_guest={str(uuid4())}",
|
|
4847
|
-
},
|
|
4848
|
-
params={
|
|
4849
|
-
"q": prompt,
|
|
4850
|
-
"page": 1,
|
|
4851
|
-
"count": 10,
|
|
4852
|
-
"safeSearch": "Off",
|
|
4853
|
-
"onShoppingPage": False,
|
|
4854
|
-
"mkt": "",
|
|
4855
|
-
"responseFilter": "WebPages,Translations,TimeZone,Computation,RelatedSearches",
|
|
4856
|
-
"domain": "youchat",
|
|
4857
|
-
"queryTraceId": str(uuid4()),
|
|
4858
|
-
"chat": [],
|
|
4859
|
-
},
|
|
4860
|
-
impersonate="chrome107",
|
|
4861
|
-
)
|
|
4862
|
-
if "youChatToken" not in resp.text:
|
|
4863
|
-
raise RequestsError("Unable to fetch the response.")
|
|
4864
|
-
return (
|
|
4865
|
-
"".join(
|
|
4866
|
-
findall(
|
|
4867
|
-
r"{\"youChatToken\": \"(.*?)\"}",
|
|
4868
|
-
resp.content.decode("unicode-escape"),
|
|
4869
|
-
)
|
|
4870
|
-
)
|
|
4871
|
-
.replace("\\n", "\n")
|
|
4872
|
-
.replace("\\\\", "\\")
|
|
4873
|
-
.replace('\\"', '"')
|
|
4874
|
-
)
|
|
4875
|
-
|
|
4876
|
-
@staticmethod
|
|
4877
|
-
def chat_cli(prompt):
|
|
4878
|
-
"""Generate completion based on the provided prompt"""
|
|
4879
|
-
you_chat = youChat()
|
|
4880
|
-
completion = you_chat.create(prompt)
|
|
4881
|
-
print(completion)
|
|
4990
|
+
return response["text"]
|
|
4882
4991
|
#-------------------------------------------------------Gemini--------------------------------------------------------
|
|
4883
4992
|
from Bard import Chatbot
|
|
4884
4993
|
import logging
|
webscout/AIutel.py
CHANGED
webscout/__init__.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Webscout.
|
|
2
2
|
|
|
3
|
-
Search for anything using the Google, DuckDuckGo
|
|
3
|
+
Search for anything using the Google, DuckDuckGo, phind.com. Also containes AI models, can transcribe yt videos, temporary email and phone number generation, have TTS support and webai(terminal gpt and open interpeter)
|
|
4
4
|
"""
|
|
5
5
|
import g4f
|
|
6
6
|
import logging
|
|
@@ -28,6 +28,8 @@ webai = [
|
|
|
28
28
|
"reka",
|
|
29
29
|
"cohere",
|
|
30
30
|
"yepchat",
|
|
31
|
+
"you",
|
|
32
|
+
"xjai"
|
|
31
33
|
]
|
|
32
34
|
|
|
33
35
|
gpt4free_providers = [
|
webscout/async_providers.py
CHANGED
|
@@ -1,33 +1,33 @@
|
|
|
1
|
-
from webscout.AI import AsyncPhindSearch
|
|
2
|
-
from webscout.AI import AsyncYEPCHAT
|
|
3
|
-
from webscout.AI import AsyncOPENGPT
|
|
4
|
-
from webscout.AI import AsyncOPENAI
|
|
5
|
-
from webscout.AI import AsyncLLAMA2
|
|
6
|
-
from webscout.AI import AsyncLEO
|
|
7
|
-
from webscout.AI import AsyncKOBOLDAI
|
|
8
|
-
from webscout.AI import AsyncGROQ
|
|
9
|
-
from webscout.AI import AsyncBLACKBOXAI
|
|
10
|
-
from webscout.AI import AsyncGPT4FREE
|
|
11
|
-
|
|
12
|
-
mapper: dict[str, object] = {
|
|
13
|
-
"phind": AsyncPhindSearch,
|
|
14
|
-
"opengpt": AsyncOPENGPT,
|
|
15
|
-
"koboldai": AsyncKOBOLDAI,
|
|
16
|
-
"blackboxai": AsyncBLACKBOXAI,
|
|
17
|
-
"gpt4free": AsyncGPT4FREE,
|
|
18
|
-
"llama2": AsyncLLAMA2,
|
|
19
|
-
"yepchat": AsyncYEPCHAT,
|
|
20
|
-
"leo": AsyncLEO,
|
|
21
|
-
"groq": AsyncGROQ,
|
|
22
|
-
"openai": AsyncOPENAI,
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
tgpt_mapper: dict[str, object] = {
|
|
26
|
-
"phind": AsyncPhindSearch,
|
|
27
|
-
"opengpt": AsyncOPENGPT,
|
|
28
|
-
"koboldai": AsyncKOBOLDAI,
|
|
29
|
-
# "gpt4free": AsyncGPT4FREE,
|
|
30
|
-
"blackboxai": AsyncBLACKBOXAI,
|
|
31
|
-
"llama2": AsyncLLAMA2,
|
|
32
|
-
"yepchat": AsyncYEPCHAT,
|
|
1
|
+
from webscout.AI import AsyncPhindSearch
|
|
2
|
+
from webscout.AI import AsyncYEPCHAT
|
|
3
|
+
from webscout.AI import AsyncOPENGPT
|
|
4
|
+
from webscout.AI import AsyncOPENAI
|
|
5
|
+
from webscout.AI import AsyncLLAMA2
|
|
6
|
+
from webscout.AI import AsyncLEO
|
|
7
|
+
from webscout.AI import AsyncKOBOLDAI
|
|
8
|
+
from webscout.AI import AsyncGROQ
|
|
9
|
+
from webscout.AI import AsyncBLACKBOXAI
|
|
10
|
+
from webscout.AI import AsyncGPT4FREE
|
|
11
|
+
|
|
12
|
+
mapper: dict[str, object] = {
|
|
13
|
+
"phind": AsyncPhindSearch,
|
|
14
|
+
"opengpt": AsyncOPENGPT,
|
|
15
|
+
"koboldai": AsyncKOBOLDAI,
|
|
16
|
+
"blackboxai": AsyncBLACKBOXAI,
|
|
17
|
+
"gpt4free": AsyncGPT4FREE,
|
|
18
|
+
"llama2": AsyncLLAMA2,
|
|
19
|
+
"yepchat": AsyncYEPCHAT,
|
|
20
|
+
"leo": AsyncLEO,
|
|
21
|
+
"groq": AsyncGROQ,
|
|
22
|
+
"openai": AsyncOPENAI,
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
tgpt_mapper: dict[str, object] = {
|
|
26
|
+
"phind": AsyncPhindSearch,
|
|
27
|
+
"opengpt": AsyncOPENGPT,
|
|
28
|
+
"koboldai": AsyncKOBOLDAI,
|
|
29
|
+
# "gpt4free": AsyncGPT4FREE,
|
|
30
|
+
"blackboxai": AsyncBLACKBOXAI,
|
|
31
|
+
"llama2": AsyncLLAMA2,
|
|
32
|
+
"yepchat": AsyncYEPCHAT,
|
|
33
33
|
}
|