webscout 5.2__py3-none-any.whl → 5.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +10 -10
- webscout/DWEBS.py +99 -77
- webscout/Provider/Chatify.py +174 -0
- webscout/Provider/NetFly.py +21 -61
- webscout/Provider/RUBIKSAI.py +11 -5
- webscout/Provider/TTS/streamElements.py +4 -8
- webscout/Provider/TTS/voicepod.py +11 -7
- webscout/Provider/__init__.py +6 -4
- webscout/Provider/ai4chat.py +14 -8
- webscout/Provider/cerebras.py +199 -0
- webscout/Provider/felo_search.py +28 -68
- webscout/Provider/x0gpt.py +181 -0
- webscout/__init__.py +2 -2
- webscout/exceptions.py +2 -1
- webscout/transcriber.py +195 -140
- {webscout-5.2.dist-info → webscout-5.3.dist-info}/METADATA +12 -73
- {webscout-5.2.dist-info → webscout-5.3.dist-info}/RECORD +21 -21
- webscout/Provider/Berlin4h.py +0 -217
- webscout/Provider/liaobots.py +0 -268
- webscout/voice.py +0 -34
- {webscout-5.2.dist-info → webscout-5.3.dist-info}/LICENSE.md +0 -0
- {webscout-5.2.dist-info → webscout-5.3.dist-info}/WHEEL +0 -0
- {webscout-5.2.dist-info → webscout-5.3.dist-info}/entry_points.txt +0 -0
- {webscout-5.2.dist-info → webscout-5.3.dist-info}/top_level.txt +0 -0
webscout/AIutel.py
CHANGED
|
@@ -594,16 +594,16 @@ w.print_weather(weather)
|
|
|
594
594
|
interpreter (str, optional): Python's interpreter name. Defaults to Python.
|
|
595
595
|
prettify (bool, optional): Prettify the code on stdout. Defaults to True.
|
|
596
596
|
"""
|
|
597
|
-
if not quiet:
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
597
|
+
# if not quiet:
|
|
598
|
+
# print(
|
|
599
|
+
# "Rawdog is an experimental tool that generates and auto-executes Python scripts in the cli.\n"
|
|
600
|
+
# "To get the most out of Rawdog. Ensure the following are installed:\n"
|
|
601
|
+
# " 1. Python 3.x\n"
|
|
602
|
+
# " 2. Dependency:\n"
|
|
603
|
+
# " - Matplotlib\n"
|
|
604
|
+
# "Be alerted on the risk posed! (Experimental)\n"
|
|
605
|
+
# "Use '--quiet' to suppress this message and code/logs stdout.\n"
|
|
606
|
+
# )
|
|
607
607
|
self.internal_exec = internal_exec
|
|
608
608
|
self.confirm_script = confirm_script
|
|
609
609
|
self.quiet = quiet
|
webscout/DWEBS.py
CHANGED
|
@@ -8,18 +8,19 @@ import time
|
|
|
8
8
|
import random
|
|
9
9
|
|
|
10
10
|
class GoogleS:
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
|
|
11
|
+
"""
|
|
12
|
+
Class to perform Google searches and retrieve results.
|
|
13
|
+
"""
|
|
14
14
|
|
|
15
15
|
def __init__(
|
|
16
16
|
self,
|
|
17
17
|
headers: Optional[Dict[str, str]] = None,
|
|
18
18
|
proxy: Optional[str] = None,
|
|
19
19
|
timeout: Optional[int] = 10,
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
20
|
+
max_workers: int = 20 # Increased max workers for thread pool
|
|
21
|
+
):
|
|
22
|
+
"""Initializes the GoogleS object."""
|
|
23
|
+
self.proxy = proxy
|
|
23
24
|
self.headers = headers if headers else {
|
|
24
25
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62"
|
|
25
26
|
}
|
|
@@ -28,20 +29,19 @@ class GoogleS:
|
|
|
28
29
|
self.client.headers.update(self.headers)
|
|
29
30
|
self.client.proxies.update({"http": self.proxy, "https": self.proxy})
|
|
30
31
|
self.timeout = timeout
|
|
32
|
+
self._executor = ThreadPoolExecutor(max_workers=max_workers)
|
|
31
33
|
|
|
32
|
-
def __enter__(self)
|
|
34
|
+
def __enter__(self):
|
|
33
35
|
return self
|
|
34
36
|
|
|
35
37
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
36
38
|
self.client.close()
|
|
37
39
|
|
|
38
|
-
def _get_url(
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
data: Optional[Union[Dict[str, str], bytes]] = None,
|
|
44
|
-
) -> bytes:
|
|
40
|
+
def _get_url(self, method: str, url: str, params: Optional[Dict[str, str]] = None,
|
|
41
|
+
data: Optional[Union[Dict[str, str], bytes]] = None) -> bytes:
|
|
42
|
+
"""
|
|
43
|
+
Makes an HTTP request and returns the response content.
|
|
44
|
+
"""
|
|
45
45
|
try:
|
|
46
46
|
resp = self.client.request(method, url, params=params, data=data, timeout=self.timeout)
|
|
47
47
|
except Exception as ex:
|
|
@@ -50,13 +50,13 @@ class GoogleS:
|
|
|
50
50
|
return resp.content
|
|
51
51
|
raise Exception(f"{resp.url} returned status code {resp.status_code}. {params=} {data=}")
|
|
52
52
|
|
|
53
|
-
def
|
|
54
|
-
"""
|
|
55
|
-
|
|
56
|
-
|
|
53
|
+
def _extract_text_from_webpage(self, html_content: bytes, max_characters: Optional[int] = None) -> str:
|
|
54
|
+
"""
|
|
55
|
+
Extracts visible text from HTML content using lxml parser.
|
|
56
|
+
"""
|
|
57
|
+
soup = BeautifulSoup(html_content, 'lxml') # Use lxml parser
|
|
57
58
|
for tag in soup(["script", "style", "header", "footer", "nav"]):
|
|
58
59
|
tag.extract()
|
|
59
|
-
# Get the remaining visible text
|
|
60
60
|
visible_text = soup.get_text(strip=True)
|
|
61
61
|
if max_characters:
|
|
62
62
|
visible_text = visible_text[:max_characters]
|
|
@@ -64,32 +64,56 @@ class GoogleS:
|
|
|
64
64
|
|
|
65
65
|
def search(
|
|
66
66
|
self,
|
|
67
|
-
|
|
67
|
+
query: str,
|
|
68
68
|
region: str = "us-en",
|
|
69
|
-
|
|
69
|
+
language: str = "en",
|
|
70
70
|
safe: str = "off",
|
|
71
|
-
|
|
72
|
-
max_results:
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
) -> List[Dict[str, str]]:
|
|
76
|
-
"""
|
|
77
|
-
|
|
71
|
+
time_period: Optional[str] = None,
|
|
72
|
+
max_results: int = 10,
|
|
73
|
+
extract_text: bool = False,
|
|
74
|
+
max_text_length: Optional[int] = 100,
|
|
75
|
+
) -> List[Dict[str, Union[str, int]]]:
|
|
76
|
+
"""
|
|
77
|
+
Performs a Google search and returns the results.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
query (str): The search query.
|
|
81
|
+
region (str, optional): The region to search in (e.g., "us-en"). Defaults to "us-en".
|
|
82
|
+
language (str, optional): The language of the search results (e.g., "en"). Defaults to "en".
|
|
83
|
+
safe (str, optional): Safe search setting ("off", "active"). Defaults to "off".
|
|
84
|
+
time_period (Optional[str], optional): Time period filter (e.g., "h" for past hour, "d" for past day).
|
|
85
|
+
Defaults to None.
|
|
86
|
+
max_results (int, optional): The maximum number of results to retrieve. Defaults to 10.
|
|
87
|
+
extract_text (bool, optional): Whether to extract text from the linked web pages. Defaults to False.
|
|
88
|
+
max_text_length (Optional[int], optional): The maximum length of the extracted text (in characters).
|
|
89
|
+
Defaults to 100.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
List[Dict[str, Union[str, int]]]: A list of dictionaries, each representing a search result, containing:
|
|
93
|
+
- 'title': The title of the result.
|
|
94
|
+
- 'href': The URL of the result.
|
|
95
|
+
- 'abstract': The description snippet of the result.
|
|
96
|
+
- 'index': The index of the result in the list.
|
|
97
|
+
- 'type': The type of result (currently always "web").
|
|
98
|
+
- 'visible_text': The extracted text from the web page (if `extract_text` is True).
|
|
99
|
+
"""
|
|
100
|
+
assert query, "Query cannot be empty."
|
|
78
101
|
|
|
79
102
|
results = []
|
|
80
103
|
futures = []
|
|
81
104
|
start = 0
|
|
105
|
+
|
|
82
106
|
while len(results) < max_results:
|
|
83
107
|
params = {
|
|
84
|
-
"q":
|
|
85
|
-
"num": 10,
|
|
86
|
-
"hl":
|
|
108
|
+
"q": query,
|
|
109
|
+
"num": 10,
|
|
110
|
+
"hl": language,
|
|
87
111
|
"start": start,
|
|
88
112
|
"safe": safe,
|
|
89
113
|
"gl": region,
|
|
90
114
|
}
|
|
91
|
-
if
|
|
92
|
-
params["tbs"] = f"qdr:{
|
|
115
|
+
if time_period:
|
|
116
|
+
params["tbs"] = f"qdr:{time_period}"
|
|
93
117
|
|
|
94
118
|
futures.append(self._executor.submit(self._get_url, "GET", "https://www.google.com/search", params=params))
|
|
95
119
|
start += 10
|
|
@@ -97,54 +121,52 @@ class GoogleS:
|
|
|
97
121
|
for future in as_completed(futures):
|
|
98
122
|
try:
|
|
99
123
|
resp_content = future.result()
|
|
100
|
-
soup = BeautifulSoup(resp_content,
|
|
101
|
-
|
|
124
|
+
soup = BeautifulSoup(resp_content, 'lxml') # Use lxml parser
|
|
125
|
+
result_blocks = soup.find_all("div", class_="g")
|
|
102
126
|
|
|
103
|
-
if not
|
|
127
|
+
if not result_blocks:
|
|
104
128
|
break
|
|
105
129
|
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
except Exception as e:
|
|
144
|
-
print(f"Error extracting result: {e}")
|
|
130
|
+
# Extract links and titles first
|
|
131
|
+
for result_block in result_blocks:
|
|
132
|
+
link = result_block.find("a", href=True)
|
|
133
|
+
title = result_block.find("h3")
|
|
134
|
+
description_box = result_block.find(
|
|
135
|
+
"div", {"style": "-webkit-line-clamp:2"}
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
if link and title and description_box:
|
|
139
|
+
url = link["href"]
|
|
140
|
+
results.append({
|
|
141
|
+
"title": title.text,
|
|
142
|
+
"href": url,
|
|
143
|
+
"abstract": description_box.text,
|
|
144
|
+
"index": len(results),
|
|
145
|
+
"type": "web",
|
|
146
|
+
"visible_text": "" # Initialize visible_text as empty string
|
|
147
|
+
})
|
|
148
|
+
|
|
149
|
+
if len(results) >= max_results:
|
|
150
|
+
break # Stop if we have enough results
|
|
151
|
+
|
|
152
|
+
# Parallelize text extraction if needed
|
|
153
|
+
if extract_text:
|
|
154
|
+
with ThreadPoolExecutor(max_workers=self._executor._max_workers) as text_extractor:
|
|
155
|
+
extraction_futures = [
|
|
156
|
+
text_extractor.submit(self._extract_text_from_webpage,
|
|
157
|
+
self._get_url("GET", result['href']),
|
|
158
|
+
max_characters=max_text_length)
|
|
159
|
+
for result in results
|
|
160
|
+
if 'href' in result
|
|
161
|
+
]
|
|
162
|
+
for i, future in enumerate(as_completed(extraction_futures)):
|
|
163
|
+
try:
|
|
164
|
+
results[i]['visible_text'] = future.result()
|
|
165
|
+
except Exception as e:
|
|
166
|
+
print(f"Error extracting text: {e}")
|
|
145
167
|
|
|
146
168
|
except Exception as e:
|
|
147
|
-
print(f"Error
|
|
169
|
+
print(f"Error: {e}")
|
|
148
170
|
|
|
149
171
|
return results
|
|
150
172
|
|
|
@@ -152,6 +174,6 @@ class GoogleS:
|
|
|
152
174
|
if __name__ == "__main__":
|
|
153
175
|
from rich import print
|
|
154
176
|
searcher = GoogleS()
|
|
155
|
-
results = searcher.search("HelpingAI-9B", max_results=20,
|
|
177
|
+
results = searcher.search("HelpingAI-9B", max_results=20, extract_text=False, max_text_length=200)
|
|
156
178
|
for result in results:
|
|
157
179
|
print(result)
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
from webscout.AIutel import Optimizers
|
|
2
|
+
from webscout.AIutel import Conversation
|
|
3
|
+
from webscout.AIutel import AwesomePrompts
|
|
4
|
+
from webscout.AIbase import Provider
|
|
5
|
+
from webscout import exceptions
|
|
6
|
+
import requests
|
|
7
|
+
|
|
8
|
+
class Chatify(Provider):
|
|
9
|
+
"""
|
|
10
|
+
A class to interact with the Chatify AI API.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
is_conversation: bool = True,
|
|
16
|
+
max_tokens: int = 600,
|
|
17
|
+
timeout: int = 30,
|
|
18
|
+
intro: str = None,
|
|
19
|
+
filepath: str = None,
|
|
20
|
+
update_file: bool = True,
|
|
21
|
+
proxies: dict = {},
|
|
22
|
+
history_offset: int = 10250,
|
|
23
|
+
act: str = None,
|
|
24
|
+
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
25
|
+
):
|
|
26
|
+
"""
|
|
27
|
+
Initializes the Chatify AI API with given parameters.
|
|
28
|
+
"""
|
|
29
|
+
self.session = requests.Session()
|
|
30
|
+
self.is_conversation = is_conversation
|
|
31
|
+
self.max_tokens_to_sample = max_tokens
|
|
32
|
+
self.api_endpoint = "https://chatify-ai.vercel.app/api/chat"
|
|
33
|
+
self.timeout = timeout
|
|
34
|
+
self.last_response = {}
|
|
35
|
+
self.headers = {
|
|
36
|
+
'Accept': '*/*',
|
|
37
|
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
38
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
39
|
+
'Content-Type': 'application/json',
|
|
40
|
+
'DNT': '1',
|
|
41
|
+
'Origin': 'https://chatify-ai.vercel.app',
|
|
42
|
+
'Referer': 'https://chatify-ai.vercel.app/',
|
|
43
|
+
'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
44
|
+
'Sec-CH-UA-Mobile': '?0',
|
|
45
|
+
'Sec-CH-UA-Platform': '"Windows"',
|
|
46
|
+
'Sec-Fetch-Dest': 'empty',
|
|
47
|
+
'Sec-Fetch-Mode': 'cors',
|
|
48
|
+
'Sec-Fetch-Site': 'same-origin',
|
|
49
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0',
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
self.__available_optimizers = (
|
|
53
|
+
method
|
|
54
|
+
for method in dir(Optimizers)
|
|
55
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
56
|
+
)
|
|
57
|
+
self.session.headers.update(self.headers)
|
|
58
|
+
Conversation.intro = (
|
|
59
|
+
AwesomePrompts().get_act(
|
|
60
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
61
|
+
)
|
|
62
|
+
if act
|
|
63
|
+
else intro or Conversation.intro
|
|
64
|
+
)
|
|
65
|
+
self.conversation = Conversation(
|
|
66
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
67
|
+
)
|
|
68
|
+
self.conversation.history_offset = history_offset
|
|
69
|
+
self.session.proxies = proxies
|
|
70
|
+
self.system_prompt = system_prompt
|
|
71
|
+
|
|
72
|
+
def ask(
|
|
73
|
+
self,
|
|
74
|
+
prompt: str,
|
|
75
|
+
stream: bool = False,
|
|
76
|
+
raw: bool = False,
|
|
77
|
+
optimizer: str = None,
|
|
78
|
+
conversationally: bool = False,
|
|
79
|
+
) -> dict:
|
|
80
|
+
"""
|
|
81
|
+
Sends a prompt to the Chatify API and returns the response.
|
|
82
|
+
"""
|
|
83
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
84
|
+
if optimizer:
|
|
85
|
+
if optimizer in self.__available_optimizers:
|
|
86
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
87
|
+
conversation_prompt if conversationally else prompt
|
|
88
|
+
)
|
|
89
|
+
else:
|
|
90
|
+
raise Exception(
|
|
91
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
messages = []
|
|
95
|
+
if self.system_prompt:
|
|
96
|
+
messages.append({"role": "system", "content": self.system_prompt})
|
|
97
|
+
messages.append({"role": "user", "content": conversation_prompt})
|
|
98
|
+
|
|
99
|
+
payload = {
|
|
100
|
+
"messages": messages
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
def for_stream():
|
|
104
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
|
|
105
|
+
if not response.ok:
|
|
106
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
107
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
streaming_text = ""
|
|
111
|
+
for line in response.iter_lines():
|
|
112
|
+
if line:
|
|
113
|
+
decoded_line = line.decode('utf-8') # Decode the line
|
|
114
|
+
parts = decoded_line.split(':', 1)
|
|
115
|
+
if len(parts) > 1:
|
|
116
|
+
content = parts[1].strip().strip('"')
|
|
117
|
+
streaming_text += content
|
|
118
|
+
yield content if raw else dict(text=streaming_text)
|
|
119
|
+
self.last_response.update(dict(text=streaming_text))
|
|
120
|
+
self.conversation.update_chat_history(
|
|
121
|
+
prompt, self.get_message(self.last_response)
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
def for_non_stream():
|
|
125
|
+
for _ in for_stream():
|
|
126
|
+
pass
|
|
127
|
+
return self.last_response
|
|
128
|
+
|
|
129
|
+
return for_stream() if stream else for_non_stream()
|
|
130
|
+
|
|
131
|
+
def chat(
|
|
132
|
+
self,
|
|
133
|
+
prompt: str,
|
|
134
|
+
stream: bool = False,
|
|
135
|
+
optimizer: str = None,
|
|
136
|
+
conversationally: bool = False,
|
|
137
|
+
) -> str:
|
|
138
|
+
"""
|
|
139
|
+
Generates a response from the Chatify API.
|
|
140
|
+
"""
|
|
141
|
+
|
|
142
|
+
def for_stream():
|
|
143
|
+
for response in self.ask(
|
|
144
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
145
|
+
):
|
|
146
|
+
yield self.get_message(response)
|
|
147
|
+
|
|
148
|
+
def for_non_stream():
|
|
149
|
+
return self.get_message(
|
|
150
|
+
self.ask(
|
|
151
|
+
prompt,
|
|
152
|
+
False,
|
|
153
|
+
optimizer=optimizer,
|
|
154
|
+
conversationally=conversationally,
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
return for_stream() if stream else for_non_stream()
|
|
159
|
+
|
|
160
|
+
def get_message(self, response: dict) -> str:
|
|
161
|
+
"""
|
|
162
|
+
Extracts the message from the API response.
|
|
163
|
+
"""
|
|
164
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
165
|
+
return response["text"]
|
|
166
|
+
|
|
167
|
+
# Example usage
|
|
168
|
+
if __name__ == "__main__":
|
|
169
|
+
from rich import print
|
|
170
|
+
|
|
171
|
+
ai = Chatify()
|
|
172
|
+
response = ai.chat(input(">>> "))
|
|
173
|
+
for chunk in response:
|
|
174
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/NetFly.py
CHANGED
|
@@ -1,32 +1,16 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
from selenium import webdriver
|
|
4
|
-
from selenium.webdriver.chrome.options import Options
|
|
5
|
-
from selenium.webdriver.common.by import By
|
|
6
|
-
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
-
import click
|
|
9
1
|
import requests
|
|
10
|
-
|
|
11
|
-
from uuid import uuid4
|
|
12
|
-
from re import findall
|
|
13
|
-
from requests.exceptions import RequestException
|
|
14
|
-
from curl_cffi.requests import get, RequestsError
|
|
15
|
-
import g4f
|
|
2
|
+
|
|
16
3
|
from random import randint
|
|
17
|
-
|
|
18
|
-
import io
|
|
19
|
-
import re
|
|
4
|
+
|
|
20
5
|
import json
|
|
21
|
-
|
|
6
|
+
|
|
22
7
|
from webscout.AIutel import Optimizers
|
|
23
8
|
from webscout.AIutel import Conversation
|
|
24
9
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
10
|
from webscout.AIbase import Provider, AsyncProvider
|
|
26
11
|
from webscout import exceptions
|
|
27
12
|
from typing import Any, AsyncGenerator, Dict
|
|
28
|
-
|
|
29
|
-
import httpx
|
|
13
|
+
|
|
30
14
|
|
|
31
15
|
class NetFly(Provider):
|
|
32
16
|
"""
|
|
@@ -121,18 +105,6 @@ class NetFly(Provider):
|
|
|
121
105
|
optimizer: str = None,
|
|
122
106
|
conversationally: bool = False,
|
|
123
107
|
) -> dict:
|
|
124
|
-
"""Chat with AI
|
|
125
|
-
|
|
126
|
-
Args:
|
|
127
|
-
prompt (str): Prompt to be send.
|
|
128
|
-
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
129
|
-
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
130
|
-
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
131
|
-
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
132
|
-
|
|
133
|
-
Returns:
|
|
134
|
-
The response from the API.
|
|
135
|
-
"""
|
|
136
108
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
137
109
|
if optimizer:
|
|
138
110
|
if optimizer in self.__available_optimizers:
|
|
@@ -166,7 +138,8 @@ class NetFly(Provider):
|
|
|
166
138
|
raise exceptions.FailedToGenerateResponseError(
|
|
167
139
|
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
168
140
|
)
|
|
169
|
-
|
|
141
|
+
|
|
142
|
+
full_response = ""
|
|
170
143
|
for line in response.iter_lines(decode_unicode=True):
|
|
171
144
|
if line:
|
|
172
145
|
if line.startswith("data: "):
|
|
@@ -175,30 +148,25 @@ class NetFly(Provider):
|
|
|
175
148
|
break
|
|
176
149
|
try:
|
|
177
150
|
data = json.loads(json_data)
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
buffer += content
|
|
182
|
-
# Check for completion marker (period in this case)
|
|
183
|
-
if buffer.endswith(".") or buffer.endswith("\n"):
|
|
184
|
-
yield buffer if raw else dict(text=buffer)
|
|
185
|
-
buffer = "" # Clear the buffer
|
|
151
|
+
content = data["choices"][0]["delta"].get("content", "")
|
|
152
|
+
full_response += content
|
|
153
|
+
yield content if raw else dict(text=content)
|
|
186
154
|
except json.decoder.JSONDecodeError:
|
|
187
155
|
continue
|
|
188
156
|
|
|
189
|
-
|
|
190
|
-
if buffer:
|
|
191
|
-
yield buffer if raw else dict(text=buffer)
|
|
192
|
-
|
|
193
|
-
self.last_response.update(dict(text=buffer))
|
|
157
|
+
self.last_response.update(dict(text=full_response))
|
|
194
158
|
self.conversation.update_chat_history(
|
|
195
159
|
prompt, self.get_message(self.last_response)
|
|
196
160
|
)
|
|
197
161
|
|
|
198
162
|
def for_non_stream():
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
163
|
+
full_response = ""
|
|
164
|
+
for chunk in for_stream():
|
|
165
|
+
if isinstance(chunk, dict):
|
|
166
|
+
full_response += chunk['text']
|
|
167
|
+
else:
|
|
168
|
+
full_response += chunk
|
|
169
|
+
return dict(text=full_response)
|
|
202
170
|
|
|
203
171
|
return for_stream() if stream else for_non_stream()
|
|
204
172
|
|
|
@@ -209,16 +177,6 @@ class NetFly(Provider):
|
|
|
209
177
|
optimizer: str = None,
|
|
210
178
|
conversationally: bool = False,
|
|
211
179
|
) -> str:
|
|
212
|
-
"""Generate response `str`
|
|
213
|
-
Args:
|
|
214
|
-
prompt (str): Prompt to be send.
|
|
215
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
216
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
217
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
218
|
-
Returns:
|
|
219
|
-
str: Response generated
|
|
220
|
-
"""
|
|
221
|
-
|
|
222
180
|
def for_stream():
|
|
223
181
|
for response in self.ask(
|
|
224
182
|
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
@@ -248,9 +206,11 @@ class NetFly(Provider):
|
|
|
248
206
|
"""
|
|
249
207
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
250
208
|
return response["text"]
|
|
209
|
+
|
|
251
210
|
if __name__ == '__main__':
|
|
252
211
|
from rich import print
|
|
253
212
|
ai = NetFly()
|
|
254
|
-
response = ai.chat("tell me about india")
|
|
213
|
+
response = ai.chat("tell me about india", stream=True)
|
|
255
214
|
for chunk in response:
|
|
256
|
-
print(chunk, end="", flush=True)
|
|
215
|
+
print(chunk, end="", flush=True)
|
|
216
|
+
print() # Add a newline at the end
|
webscout/Provider/RUBIKSAI.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
3
|
from typing import Any, Dict, Optional
|
|
4
|
-
from
|
|
5
|
-
from
|
|
6
|
-
from
|
|
7
|
-
from
|
|
4
|
+
from webscout.AIutel import Optimizers
|
|
5
|
+
from webscout.AIutel import Conversation
|
|
6
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
8
|
from webscout import exceptions
|
|
9
9
|
|
|
10
10
|
|
|
@@ -198,4 +198,10 @@ class RUBIKSAI(Provider):
|
|
|
198
198
|
str: Message extracted
|
|
199
199
|
"""
|
|
200
200
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
201
|
-
return response["text"]
|
|
201
|
+
return response["text"]
|
|
202
|
+
if __name__ == '__main__':
|
|
203
|
+
from rich import print
|
|
204
|
+
ai = RUBIKSAI()
|
|
205
|
+
response = ai.chat(input(">>> "))
|
|
206
|
+
for chunk in response:
|
|
207
|
+
print(chunk, end="", flush=True)
|
|
@@ -4,7 +4,7 @@ import requests
|
|
|
4
4
|
import pathlib
|
|
5
5
|
import urllib.parse
|
|
6
6
|
from typing import Union, Generator
|
|
7
|
-
|
|
7
|
+
from playsound import playsound
|
|
8
8
|
from webscout import exceptions
|
|
9
9
|
from webscout.AIbase import TTSProvider
|
|
10
10
|
|
|
@@ -267,7 +267,7 @@ class StreamElements(TTSProvider):
|
|
|
267
267
|
|
|
268
268
|
def play_audio(self, filename: str):
|
|
269
269
|
"""
|
|
270
|
-
Plays an audio file using
|
|
270
|
+
Plays an audio file using playsound.
|
|
271
271
|
|
|
272
272
|
Args:
|
|
273
273
|
filename (str): The path to the audio file.
|
|
@@ -276,11 +276,7 @@ class StreamElements(TTSProvider):
|
|
|
276
276
|
RuntimeError: If there is an error playing the audio.
|
|
277
277
|
"""
|
|
278
278
|
try:
|
|
279
|
-
|
|
280
|
-
pygame.mixer.music.load(filename)
|
|
281
|
-
pygame.mixer.music.play()
|
|
282
|
-
while pygame.mixer.music.get_busy():
|
|
283
|
-
pygame.time.Clock().tick(10)
|
|
279
|
+
playsound(filename)
|
|
284
280
|
except Exception as e:
|
|
285
281
|
raise RuntimeError(f"Error playing audio: {e}")
|
|
286
282
|
|
|
@@ -293,4 +289,4 @@ if __name__ == "__main__":
|
|
|
293
289
|
audio_file = streamelements.tts(text, voice="Brian")
|
|
294
290
|
|
|
295
291
|
print("Playing audio...")
|
|
296
|
-
streamelements.play_audio(audio_file)
|
|
292
|
+
streamelements.play_audio(audio_file)
|