webscout 4.7__py3-none-any.whl → 4.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Agents/functioncall.py +97 -37
- webscout/Bard.py +365 -0
- webscout/Bing_search.py +124 -0
- webscout/DWEBS.py +141 -777
- webscout/Local/_version.py +1 -1
- webscout/Provider/Andi.py +7 -1
- webscout/Provider/BasedGPT.py +11 -5
- webscout/Provider/Berlin4h.py +11 -5
- webscout/Provider/Blackboxai.py +10 -4
- webscout/Provider/Cloudflare.py +286 -0
- webscout/Provider/Cohere.py +11 -5
- webscout/Provider/DARKAI.py +25 -7
- webscout/Provider/Deepinfra.py +2 -1
- webscout/Provider/Deepseek.py +25 -9
- webscout/Provider/DiscordRocks.py +389 -0
- webscout/Provider/Farfalle.py +227 -0
- webscout/Provider/Gemini.py +1 -1
- webscout/Provider/Groq.py +244 -110
- webscout/Provider/Llama.py +13 -5
- webscout/Provider/Llama3.py +15 -2
- webscout/Provider/OLLAMA.py +8 -7
- webscout/Provider/{Geminiflash.py → PI.py} +96 -40
- webscout/Provider/Perplexity.py +422 -52
- webscout/Provider/Phind.py +6 -5
- webscout/Provider/PizzaGPT.py +7 -1
- webscout/Provider/Youchat.py +98 -76
- webscout/Provider/__init__.py +26 -31
- webscout/Provider/ai4chat.py +193 -0
- webscout/Provider/{VTLchat.py → felo_search.py} +62 -76
- webscout/Provider/julius.py +263 -0
- webscout/Provider/koala.py +11 -5
- webscout/Provider/liaobots.py +268 -0
- webscout/Provider/meta.py +2 -1
- webscout/Provider/{ChatGPTUK.py → turboseek.py} +79 -56
- webscout/Provider/{FreeGemini.py → xdash.py} +51 -18
- webscout/Provider/yep.py +258 -0
- webscout/__init__.py +1 -59
- webscout/version.py +1 -1
- webscout/webai.py +2 -64
- webscout/webscout_search.py +1 -1
- {webscout-4.7.dist-info → webscout-4.9.dist-info}/METADATA +249 -323
- webscout-4.9.dist-info/RECORD +83 -0
- webscout/GoogleS.py +0 -342
- webscout/Provider/Geminipro.py +0 -152
- webscout/Provider/Leo.py +0 -469
- webscout/Provider/OpenGPT.py +0 -867
- webscout/Provider/Xjai.py +0 -230
- webscout/Provider/Yepchat.py +0 -478
- webscout-4.7.dist-info/RECORD +0 -80
- {webscout-4.7.dist-info → webscout-4.9.dist-info}/LICENSE.md +0 -0
- {webscout-4.7.dist-info → webscout-4.9.dist-info}/WHEEL +0 -0
- {webscout-4.7.dist-info → webscout-4.9.dist-info}/entry_points.txt +0 -0
- {webscout-4.7.dist-info → webscout-4.9.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import re
|
|
3
|
+
import uuid
|
|
4
|
+
import gzip
|
|
5
|
+
import zlib
|
|
6
|
+
from typing import Any, Dict, Generator, Union
|
|
7
|
+
|
|
8
|
+
import requests
|
|
9
|
+
|
|
10
|
+
from webscout.AIutel import Optimizers
|
|
11
|
+
from webscout.AIutel import Conversation
|
|
12
|
+
from webscout.AIutel import AwesomePrompts
|
|
13
|
+
from webscout.AIbase import Provider
|
|
14
|
+
from webscout import exceptions
|
|
15
|
+
|
|
16
|
+
class LiaoBots(Provider):
|
|
17
|
+
"""
|
|
18
|
+
A class to interact with the LiaoBots API.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
# List of available models
|
|
22
|
+
AVAILABLE_MODELS = [
|
|
23
|
+
"gpt-4o-mini",
|
|
24
|
+
"gpt-4o-free",
|
|
25
|
+
"gpt-4o-mini-free",
|
|
26
|
+
"gpt-4-turbo-2024-04-09",
|
|
27
|
+
"gpt-4o",
|
|
28
|
+
"gpt-4-0613",
|
|
29
|
+
"claude-3-5-sonnet-20240620",
|
|
30
|
+
"gemini-1.5-pro-latest",
|
|
31
|
+
"gemini-1.5-flash-latest"
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
auth_code: str = "G3USRn7M5zsXn",
|
|
37
|
+
cookie: str = "gkp2=pevIjZCYj8wMcrWPEAq6",
|
|
38
|
+
is_conversation: bool = True,
|
|
39
|
+
max_tokens: int = 600,
|
|
40
|
+
timeout: int = 30,
|
|
41
|
+
intro: str = None,
|
|
42
|
+
filepath: str = None,
|
|
43
|
+
update_file: bool = True,
|
|
44
|
+
proxies: dict = {},
|
|
45
|
+
history_offset: int = 10250,
|
|
46
|
+
act: str = None,
|
|
47
|
+
model: str = "claude-3-5-sonnet-20240620",
|
|
48
|
+
system_prompt: str = "You are a helpful assistant."
|
|
49
|
+
) -> None:
|
|
50
|
+
"""
|
|
51
|
+
Initializes the LiaoBots API with given parameters.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
auth_code (str): The auth code for authentication.
|
|
55
|
+
cookie (str): The cookie for authentication.
|
|
56
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
57
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
58
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
59
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
60
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
61
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
62
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
63
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
64
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
65
|
+
model (str, optional): AI model to use for text generation. Defaults to "claude-3-5-sonnet-20240620".
|
|
66
|
+
system_prompt (str, optional): System prompt for LiaoBots. Defaults to "You are a helpful assistant.".
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
# Check if the chosen model is available
|
|
70
|
+
if model not in self.AVAILABLE_MODELS:
|
|
71
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
72
|
+
|
|
73
|
+
self.auth_code = auth_code
|
|
74
|
+
self.cookie = cookie
|
|
75
|
+
self.api_endpoint = "https://liaobots.work/api/chat"
|
|
76
|
+
self.model = model
|
|
77
|
+
self.system_prompt = system_prompt
|
|
78
|
+
self.session = requests.Session()
|
|
79
|
+
self.is_conversation = is_conversation
|
|
80
|
+
self.max_tokens_to_sample = max_tokens
|
|
81
|
+
self.stream_chunk_size = 64
|
|
82
|
+
self.timeout = timeout
|
|
83
|
+
self.last_response = {}
|
|
84
|
+
self.headers = {
|
|
85
|
+
"accept": "*/*",
|
|
86
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
87
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
88
|
+
"content-type": "application/json",
|
|
89
|
+
"cookie": self.cookie,
|
|
90
|
+
"dnt": "1",
|
|
91
|
+
"origin": "https://liaobots.work",
|
|
92
|
+
"priority": "u=1, i",
|
|
93
|
+
"referer": "https://liaobots.work/en",
|
|
94
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
95
|
+
"sec-ch-ua-mobile": "?0",
|
|
96
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
97
|
+
"sec-fetch-dest": "empty",
|
|
98
|
+
"sec-fetch-mode": "cors",
|
|
99
|
+
"sec-fetch-site": "same-origin",
|
|
100
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
|
|
101
|
+
"x-Auth-Code": self.auth_code,
|
|
102
|
+
}
|
|
103
|
+
self.__available_optimizers = (
|
|
104
|
+
method
|
|
105
|
+
for method in dir(Optimizers)
|
|
106
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
107
|
+
)
|
|
108
|
+
self.session.headers.update(self.headers)
|
|
109
|
+
Conversation.intro = (
|
|
110
|
+
AwesomePrompts().get_act(
|
|
111
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
112
|
+
)
|
|
113
|
+
if act
|
|
114
|
+
else intro or Conversation.intro
|
|
115
|
+
)
|
|
116
|
+
self.conversation = Conversation(
|
|
117
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
118
|
+
)
|
|
119
|
+
self.conversation.history_offset = history_offset
|
|
120
|
+
self.session.proxies = proxies
|
|
121
|
+
|
|
122
|
+
def ask(
|
|
123
|
+
self,
|
|
124
|
+
prompt: str,
|
|
125
|
+
stream: bool = False,
|
|
126
|
+
raw: bool = False,
|
|
127
|
+
optimizer: str = None,
|
|
128
|
+
conversationally: bool = False,
|
|
129
|
+
) -> Dict[str, Any]:
|
|
130
|
+
"""
|
|
131
|
+
Sends a prompt to the LiaoBots API and returns the response.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
prompt: The text prompt to generate text from.
|
|
135
|
+
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
136
|
+
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
137
|
+
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
138
|
+
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
The response from the API.
|
|
142
|
+
"""
|
|
143
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
144
|
+
if optimizer:
|
|
145
|
+
if optimizer in self.__available_optimizers:
|
|
146
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
147
|
+
conversation_prompt if conversationally else prompt
|
|
148
|
+
)
|
|
149
|
+
else:
|
|
150
|
+
raise Exception(
|
|
151
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
payload: Dict[str, any] = {
|
|
155
|
+
"conversationId": str(uuid.uuid4()),
|
|
156
|
+
"model": {
|
|
157
|
+
"id": self.model
|
|
158
|
+
},
|
|
159
|
+
"messages": [
|
|
160
|
+
{
|
|
161
|
+
"role": "user",
|
|
162
|
+
"content": conversation_prompt
|
|
163
|
+
}
|
|
164
|
+
],
|
|
165
|
+
"key": "",
|
|
166
|
+
"prompt": self.system_prompt
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
def for_stream():
|
|
170
|
+
response = self.session.post(
|
|
171
|
+
self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
if not response.ok:
|
|
175
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
176
|
+
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
streaming_response = ""
|
|
180
|
+
content_encoding = response.headers.get('Content-Encoding')
|
|
181
|
+
# Stream the response
|
|
182
|
+
for chunk in response.iter_content():
|
|
183
|
+
if chunk:
|
|
184
|
+
try:
|
|
185
|
+
# Decompress the chunk if necessary
|
|
186
|
+
if content_encoding == 'gzip':
|
|
187
|
+
chunk = gzip.decompress(chunk)
|
|
188
|
+
elif content_encoding == 'deflate':
|
|
189
|
+
chunk = zlib.decompress(chunk)
|
|
190
|
+
|
|
191
|
+
# Decode the chunk
|
|
192
|
+
decoded_chunk = chunk.decode('utf-8')
|
|
193
|
+
streaming_response += decoded_chunk
|
|
194
|
+
except UnicodeDecodeError:
|
|
195
|
+
# Handle non-textual data
|
|
196
|
+
pass
|
|
197
|
+
else:
|
|
198
|
+
pass
|
|
199
|
+
self.last_response.update(dict(text=streaming_response))
|
|
200
|
+
self.conversation.update_chat_history(
|
|
201
|
+
prompt, self.get_message(self.last_response)
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
if stream:
|
|
205
|
+
yield from [] # Yield nothing when streaming, focus on side effects
|
|
206
|
+
else:
|
|
207
|
+
return [] # Return empty list for non-streaming case
|
|
208
|
+
|
|
209
|
+
def for_non_stream():
|
|
210
|
+
for _ in for_stream():
|
|
211
|
+
pass
|
|
212
|
+
return self.last_response
|
|
213
|
+
|
|
214
|
+
return for_stream() if stream else for_non_stream()
|
|
215
|
+
|
|
216
|
+
def chat(
|
|
217
|
+
self,
|
|
218
|
+
prompt: str,
|
|
219
|
+
stream: bool = False,
|
|
220
|
+
optimizer: str = None,
|
|
221
|
+
conversationally: bool = False,
|
|
222
|
+
) -> str:
|
|
223
|
+
"""Generate response `str`
|
|
224
|
+
Args:
|
|
225
|
+
prompt (str): Prompt to be send.
|
|
226
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
227
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
228
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
229
|
+
Returns:
|
|
230
|
+
str: Response generated
|
|
231
|
+
"""
|
|
232
|
+
|
|
233
|
+
def for_stream():
|
|
234
|
+
for response in self.ask(
|
|
235
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
236
|
+
):
|
|
237
|
+
yield self.get_message(response)
|
|
238
|
+
|
|
239
|
+
def for_non_stream():
|
|
240
|
+
return self.get_message(
|
|
241
|
+
self.ask(
|
|
242
|
+
prompt,
|
|
243
|
+
False,
|
|
244
|
+
optimizer=optimizer,
|
|
245
|
+
conversationally=conversationally,
|
|
246
|
+
)
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
return for_stream() if stream else for_non_stream()
|
|
250
|
+
|
|
251
|
+
def get_message(self, response: dict) -> str:
|
|
252
|
+
"""Retrieves message only from response
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
response (dict): Response generated by `self.ask`
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
str: Message extracted
|
|
259
|
+
"""
|
|
260
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
261
|
+
return response["text"]
|
|
262
|
+
|
|
263
|
+
if __name__ == '__main__':
|
|
264
|
+
from rich import print
|
|
265
|
+
liaobots = LiaoBots()
|
|
266
|
+
response = liaobots.chat("tell me about india")
|
|
267
|
+
for chunk in response:
|
|
268
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/meta.py
CHANGED
|
@@ -1,29 +1,41 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
from selenium import webdriver
|
|
4
|
+
from selenium.webdriver.chrome.options import Options
|
|
5
|
+
from selenium.webdriver.common.by import By
|
|
6
|
+
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
+
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
+
import click
|
|
1
9
|
import requests
|
|
2
|
-
from
|
|
3
|
-
import
|
|
10
|
+
from requests import get
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
from re import findall
|
|
13
|
+
from requests.exceptions import RequestException
|
|
14
|
+
from curl_cffi.requests import get, RequestsError
|
|
15
|
+
import g4f
|
|
16
|
+
from random import randint
|
|
17
|
+
from PIL import Image
|
|
18
|
+
import io
|
|
4
19
|
import re
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
from
|
|
8
|
-
from
|
|
9
|
-
from
|
|
20
|
+
import json
|
|
21
|
+
import yaml
|
|
22
|
+
from webscout.AIutel import Optimizers
|
|
23
|
+
from webscout.AIutel import Conversation
|
|
24
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
10
26
|
from webscout import exceptions
|
|
27
|
+
from typing import Any, AsyncGenerator, Dict
|
|
28
|
+
import logging
|
|
29
|
+
import httpx
|
|
11
30
|
|
|
12
|
-
|
|
13
|
-
class ChatGPTUK(Provider):
|
|
31
|
+
class TurboSeek(Provider):
|
|
14
32
|
"""
|
|
15
|
-
|
|
33
|
+
This class provides methods for interacting with the TurboSeek API.
|
|
16
34
|
"""
|
|
17
|
-
|
|
18
35
|
def __init__(
|
|
19
36
|
self,
|
|
20
37
|
is_conversation: bool = True,
|
|
21
38
|
max_tokens: int = 600,
|
|
22
|
-
temperature: float = 0.9,
|
|
23
|
-
presence_penalty: float = 0,
|
|
24
|
-
frequency_penalty: float = 0,
|
|
25
|
-
top_p: float = 1,
|
|
26
|
-
model: str = "google-gemini-pro",
|
|
27
39
|
timeout: int = 30,
|
|
28
40
|
intro: str = None,
|
|
29
41
|
filepath: str = None,
|
|
@@ -31,18 +43,12 @@ class ChatGPTUK(Provider):
|
|
|
31
43
|
proxies: dict = {},
|
|
32
44
|
history_offset: int = 10250,
|
|
33
45
|
act: str = None,
|
|
34
|
-
)
|
|
35
|
-
"""
|
|
36
|
-
Initializes the ChatGPTUK API with given parameters.
|
|
46
|
+
):
|
|
47
|
+
"""Instantiates TurboSeek
|
|
37
48
|
|
|
38
49
|
Args:
|
|
39
50
|
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
40
51
|
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
41
|
-
temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.9.
|
|
42
|
-
presence_penalty (float, optional): Chances of topic being repeated. Defaults to 0.
|
|
43
|
-
frequency_penalty (float, optional): Chances of word being repeated. Defaults to 0.
|
|
44
|
-
top_p (float, optional): Sampling threshold during inference time. Defaults to 1.
|
|
45
|
-
model (str, optional): LLM model name. Defaults to "google-gemini-pro".
|
|
46
52
|
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
47
53
|
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
48
54
|
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
@@ -54,16 +60,32 @@ class ChatGPTUK(Provider):
|
|
|
54
60
|
self.session = requests.Session()
|
|
55
61
|
self.is_conversation = is_conversation
|
|
56
62
|
self.max_tokens_to_sample = max_tokens
|
|
57
|
-
self.
|
|
63
|
+
self.chat_endpoint = "https://www.turboseek.io/api/getAnswer"
|
|
58
64
|
self.stream_chunk_size = 64
|
|
59
65
|
self.timeout = timeout
|
|
60
66
|
self.last_response = {}
|
|
61
|
-
self.
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
+
self.headers = {
|
|
68
|
+
"authority": "www.turboseek.io",
|
|
69
|
+
"method": "POST",
|
|
70
|
+
"path": "/api/getAnswer",
|
|
71
|
+
"scheme": "https",
|
|
72
|
+
"accept": "*/*",
|
|
73
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
74
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
75
|
+
"content-length": "63",
|
|
76
|
+
"content-type": "application/json",
|
|
77
|
+
"dnt": "1",
|
|
78
|
+
"origin": "https://www.turboseek.io",
|
|
79
|
+
"priority": "u=1, i",
|
|
80
|
+
"referer": "https://www.turboseek.io/?ref=taaft&utm_source=taaft&utm_medium=referral",
|
|
81
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
82
|
+
"sec-ch-ua-mobile": "?0",
|
|
83
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
84
|
+
"sec-fetch-dest": "empty",
|
|
85
|
+
"sec-fetch-mode": "cors",
|
|
86
|
+
"sec-fetch-site": "same-origin",
|
|
87
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
|
|
88
|
+
}
|
|
67
89
|
|
|
68
90
|
self.__available_optimizers = (
|
|
69
91
|
method
|
|
@@ -121,40 +143,35 @@ class ChatGPTUK(Provider):
|
|
|
121
143
|
|
|
122
144
|
self.session.headers.update(self.headers)
|
|
123
145
|
payload = {
|
|
124
|
-
"
|
|
125
|
-
|
|
126
|
-
{"role": "user", "content": conversation_prompt}
|
|
127
|
-
],
|
|
128
|
-
"stream": True,
|
|
129
|
-
"model": self.model,
|
|
130
|
-
"temperature": self.temperature,
|
|
131
|
-
"presence_penalty": self.presence_penalty,
|
|
132
|
-
"frequency_penalty": self.frequency_penalty,
|
|
133
|
-
"top_p": self.top_p,
|
|
134
|
-
"max_tokens": self.max_tokens_to_sample
|
|
146
|
+
"question": conversation_prompt,
|
|
147
|
+
"sources": []
|
|
135
148
|
}
|
|
136
149
|
|
|
137
150
|
def for_stream():
|
|
138
151
|
response = self.session.post(
|
|
139
|
-
self.
|
|
152
|
+
self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
140
153
|
)
|
|
141
154
|
if not response.ok:
|
|
142
155
|
raise exceptions.FailedToGenerateResponseError(
|
|
143
156
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
144
157
|
)
|
|
145
158
|
|
|
146
|
-
|
|
147
|
-
for
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
159
|
+
streaming_text = ""
|
|
160
|
+
for value in response.iter_lines(
|
|
161
|
+
decode_unicode=True,
|
|
162
|
+
chunk_size=self.stream_chunk_size,
|
|
163
|
+
delimiter="\n",
|
|
164
|
+
):
|
|
165
|
+
try:
|
|
166
|
+
if bool(value) and value.startswith("data: "):
|
|
167
|
+
data = json.loads(value[6:])
|
|
168
|
+
if "text" in data:
|
|
169
|
+
streaming_text += data["text"]
|
|
170
|
+
resp = dict(text=streaming_text)
|
|
171
|
+
self.last_response.update(resp)
|
|
172
|
+
yield value if raw else resp
|
|
173
|
+
except json.decoder.JSONDecodeError:
|
|
174
|
+
pass
|
|
158
175
|
self.conversation.update_chat_history(
|
|
159
176
|
prompt, self.get_message(self.last_response)
|
|
160
177
|
)
|
|
@@ -211,4 +228,10 @@ class ChatGPTUK(Provider):
|
|
|
211
228
|
str: Message extracted
|
|
212
229
|
"""
|
|
213
230
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
214
|
-
return response["text"]
|
|
231
|
+
return response["text"]
|
|
232
|
+
if __name__ == '__main__':
|
|
233
|
+
from rich import print
|
|
234
|
+
ai = TurboSeek()
|
|
235
|
+
response = ai.chat(input(">>> "))
|
|
236
|
+
for chunk in response:
|
|
237
|
+
print(chunk, end="", flush=True)
|
|
@@ -19,21 +19,21 @@ import io
|
|
|
19
19
|
import re
|
|
20
20
|
import json
|
|
21
21
|
import yaml
|
|
22
|
-
from
|
|
23
|
-
from
|
|
24
|
-
from
|
|
25
|
-
from
|
|
22
|
+
from webscout.AIutel import Optimizers
|
|
23
|
+
from webscout.AIutel import Conversation
|
|
24
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
26
26
|
from webscout import exceptions
|
|
27
27
|
from typing import Any, AsyncGenerator, Dict
|
|
28
28
|
import logging
|
|
29
29
|
import httpx
|
|
30
30
|
|
|
31
|
-
class
|
|
31
|
+
class XDASH(Provider):
|
|
32
32
|
def __init__(
|
|
33
33
|
self,
|
|
34
34
|
is_conversation: bool = True,
|
|
35
35
|
max_tokens: int = 600,
|
|
36
|
-
timeout: int =
|
|
36
|
+
timeout: int = 30,
|
|
37
37
|
intro: str = None,
|
|
38
38
|
filepath: str = None,
|
|
39
39
|
update_file: bool = True,
|
|
@@ -41,7 +41,7 @@ class FreeGemini(Provider):
|
|
|
41
41
|
history_offset: int = 10250,
|
|
42
42
|
act: str = None,
|
|
43
43
|
):
|
|
44
|
-
"""Instantiates
|
|
44
|
+
"""Instantiates XDASH
|
|
45
45
|
|
|
46
46
|
Args:
|
|
47
47
|
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
@@ -57,13 +57,27 @@ class FreeGemini(Provider):
|
|
|
57
57
|
self.session = requests.Session()
|
|
58
58
|
self.is_conversation = is_conversation
|
|
59
59
|
self.max_tokens_to_sample = max_tokens
|
|
60
|
-
self.chat_endpoint = "https://
|
|
60
|
+
self.chat_endpoint = "https://www.xdash.ai/api/query"
|
|
61
|
+
self.stream_chunk_size = 64
|
|
61
62
|
self.timeout = timeout
|
|
62
63
|
self.last_response = {}
|
|
63
|
-
|
|
64
64
|
self.headers = {
|
|
65
|
-
"accept": "
|
|
66
|
-
|
|
65
|
+
"accept": "*/*",
|
|
66
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
67
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
68
|
+
"content-type": "application/json",
|
|
69
|
+
"cookie": "cf_clearance=73aup_8JU0LU.tRr7D4qd4Kt7gapKFi3RVW8jLzQoP0-1723549451-1.0.1.1-HTRrjMvM5GRLsfCTB0v3N_UxQzQMfA1fvOSf0dsZJ73HR6.IUTH8BH.G1dpx3s_IxVHCBCHMXOCt0K7vyIwMgw",
|
|
70
|
+
"dnt": "1",
|
|
71
|
+
"origin": "https://www.xdash.ai",
|
|
72
|
+
"priority": "u=1, i",
|
|
73
|
+
"referer": "https://www.xdash.ai/search",
|
|
74
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
75
|
+
"sec-ch-ua-mobile": "?0",
|
|
76
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
77
|
+
"sec-fetch-dest": "empty",
|
|
78
|
+
"sec-fetch-mode": "cors",
|
|
79
|
+
"sec-fetch-site": "same-origin",
|
|
80
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
|
|
67
81
|
}
|
|
68
82
|
|
|
69
83
|
self.__available_optimizers = (
|
|
@@ -101,6 +115,13 @@ class FreeGemini(Provider):
|
|
|
101
115
|
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
102
116
|
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
103
117
|
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
118
|
+
Returns:
|
|
119
|
+
dict : {}
|
|
120
|
+
```json
|
|
121
|
+
{
|
|
122
|
+
"text" : "How may I assist you today?"
|
|
123
|
+
}
|
|
124
|
+
```
|
|
104
125
|
"""
|
|
105
126
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
106
127
|
if optimizer:
|
|
@@ -114,23 +135,29 @@ class FreeGemini(Provider):
|
|
|
114
135
|
)
|
|
115
136
|
|
|
116
137
|
self.session.headers.update(self.headers)
|
|
117
|
-
payload = {
|
|
138
|
+
payload = {
|
|
139
|
+
"query": conversation_prompt,
|
|
140
|
+
"search_uuid": uuid.uuid4().hex,
|
|
141
|
+
"visitor_uuid": uuid.uuid4().hex,
|
|
142
|
+
"token": uuid.uuid4().hex
|
|
143
|
+
}
|
|
118
144
|
|
|
119
145
|
response = self.session.post(
|
|
120
146
|
self.chat_endpoint, json=payload, timeout=self.timeout
|
|
121
147
|
)
|
|
122
|
-
|
|
123
148
|
if not response.ok:
|
|
124
149
|
raise exceptions.FailedToGenerateResponseError(
|
|
125
150
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
126
151
|
)
|
|
127
152
|
|
|
128
|
-
|
|
129
|
-
|
|
153
|
+
# Extract the LLM response
|
|
154
|
+
llm_response = response.text.split("__LLM_RESPONSE__")[1].split("__RELATED_QUESTIONS__")[0].strip()
|
|
155
|
+
resp = dict(text=llm_response)
|
|
156
|
+
self.last_response.update(resp)
|
|
130
157
|
self.conversation.update_chat_history(
|
|
131
|
-
prompt,
|
|
158
|
+
prompt, self.get_message(self.last_response)
|
|
132
159
|
)
|
|
133
|
-
return
|
|
160
|
+
return self.last_response
|
|
134
161
|
|
|
135
162
|
def chat(
|
|
136
163
|
self,
|
|
@@ -148,6 +175,7 @@ class FreeGemini(Provider):
|
|
|
148
175
|
Returns:
|
|
149
176
|
str: Response generated
|
|
150
177
|
"""
|
|
178
|
+
|
|
151
179
|
return self.get_message(
|
|
152
180
|
self.ask(
|
|
153
181
|
prompt,
|
|
@@ -166,4 +194,9 @@ class FreeGemini(Provider):
|
|
|
166
194
|
str: Message extracted
|
|
167
195
|
"""
|
|
168
196
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
169
|
-
return response["
|
|
197
|
+
return response["text"]
|
|
198
|
+
if __name__ == '__main__':
|
|
199
|
+
from rich import print
|
|
200
|
+
ai = XDASH()
|
|
201
|
+
response = ai.chat(input(">>> "))
|
|
202
|
+
print(response)
|