webscout 6.4__py3-none-any.whl → 6.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +7 -54
- webscout/DWEBS.py +48 -26
- webscout/{YTdownloader.py → Extra/YTToolkit/YTdownloader.py} +990 -1103
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/{transcriber.py → Extra/YTToolkit/transcriber.py} +1 -1
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +45 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +37 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +60 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +102 -0
- webscout/Extra/__init__.py +2 -1
- webscout/Extra/autocoder/autocoder_utiles.py +119 -101
- webscout/Extra/autocoder/rawdog.py +679 -680
- webscout/Extra/gguf.py +441 -441
- webscout/Extra/markdownlite/__init__.py +862 -0
- webscout/Extra/weather_ascii.py +2 -2
- webscout/Provider/AISEARCH/__init__.py +2 -0
- webscout/Provider/AISEARCH/ooai.py +155 -0
- webscout/Provider/Amigo.py +70 -85
- webscout/Provider/{prefind.py → Jadve.py} +72 -70
- webscout/Provider/Netwrck.py +235 -0
- webscout/Provider/Openai.py +4 -3
- webscout/Provider/PI.py +292 -221
- webscout/Provider/PizzaGPT.py +3 -3
- webscout/Provider/Reka.py +0 -1
- webscout/Provider/TTS/__init__.py +5 -1
- webscout/Provider/TTS/deepgram.py +183 -0
- webscout/Provider/TTS/elevenlabs.py +137 -0
- webscout/Provider/TTS/gesserit.py +151 -0
- webscout/Provider/TTS/murfai.py +139 -0
- webscout/Provider/TTS/parler.py +134 -107
- webscout/Provider/TTS/streamElements.py +360 -275
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TeachAnything.py +15 -2
- webscout/Provider/Youchat.py +42 -8
- webscout/Provider/__init__.py +8 -21
- webscout/Provider/meta.py +794 -779
- webscout/Provider/multichat.py +230 -0
- webscout/Provider/promptrefine.py +2 -2
- webscout/Provider/talkai.py +10 -13
- webscout/Provider/turboseek.py +5 -4
- webscout/Provider/tutorai.py +8 -112
- webscout/Provider/typegpt.py +5 -7
- webscout/Provider/x0gpt.py +81 -9
- webscout/Provider/yep.py +123 -361
- webscout/__init__.py +33 -28
- webscout/conversation.py +24 -9
- webscout/exceptions.py +188 -20
- webscout/litprinter/__init__.py +719 -831
- webscout/litprinter/colors.py +54 -0
- webscout/optimizers.py +420 -270
- webscout/prompt_manager.py +279 -279
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +571 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/core.py +884 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +38 -0
- webscout/update_checker.py +184 -125
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +55 -0
- webscout/zeroart/base.py +60 -0
- webscout/zeroart/effects.py +99 -0
- webscout/zeroart/fonts.py +816 -0
- webscout/zerodir/__init__.py +225 -0
- {webscout-6.4.dist-info → webscout-6.6.dist-info}/METADATA +18 -231
- webscout-6.6.dist-info/RECORD +197 -0
- webscout-6.6.dist-info/top_level.txt +2 -0
- webstoken/__init__.py +30 -0
- webstoken/classifier.py +189 -0
- webstoken/keywords.py +216 -0
- webstoken/language.py +128 -0
- webstoken/ner.py +164 -0
- webstoken/normalizer.py +35 -0
- webstoken/processor.py +77 -0
- webstoken/sentiment.py +206 -0
- webstoken/stemmer.py +73 -0
- webstoken/t.py +75 -0
- webstoken/tagger.py +60 -0
- webstoken/tokenizer.py +158 -0
- webscout/Agents/Onlinesearcher.py +0 -182
- webscout/Agents/__init__.py +0 -2
- webscout/Agents/functioncall.py +0 -248
- webscout/Bing_search.py +0 -251
- webscout/Provider/Perplexity.py +0 -599
- webscout/Provider/RoboCoders.py +0 -206
- webscout/Provider/genspark.py +0 -225
- webscout/Provider/perplexitylabs.py +0 -265
- webscout/Provider/twitterclone.py +0 -251
- webscout/Provider/upstage.py +0 -230
- webscout/gpt4free.py +0 -666
- webscout/requestsHTMLfix.py +0 -775
- webscout/webai.py +0 -2590
- webscout-6.4.dist-info/RECORD +0 -154
- webscout-6.4.dist-info/top_level.txt +0 -1
- /webscout/Provider/{felo_search.py → AISEARCH/felo_search.py} +0 -0
- {webscout-6.4.dist-info → webscout-6.6.dist-info}/LICENSE.md +0 -0
- {webscout-6.4.dist-info → webscout-6.6.dist-info}/WHEEL +0 -0
- {webscout-6.4.dist-info → webscout-6.6.dist-info}/entry_points.txt +0 -0
webscout/Provider/PI.py
CHANGED
|
@@ -1,222 +1,293 @@
|
|
|
1
|
-
import cloudscraper
|
|
2
|
-
import json
|
|
3
|
-
|
|
4
|
-
import
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import
|
|
7
|
-
from webscout.AIutel import
|
|
8
|
-
from webscout.
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
self.
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
self.
|
|
86
|
-
self.
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
"""
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
1
|
+
import cloudscraper
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
import threading
|
|
5
|
+
import requests
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from typing import Dict
|
|
11
|
+
from webscout import LitAgent
|
|
12
|
+
class PiAI(Provider):
|
|
13
|
+
"""PiAI class for interacting with the Pi.ai chat API, extending the Provider class.
|
|
14
|
+
|
|
15
|
+
This class provides methods for sending messages to the Pi.ai chat API and receiving responses,
|
|
16
|
+
enabling conversational interactions. It supports various configurations such as conversation mode,
|
|
17
|
+
token limits, and history management.
|
|
18
|
+
|
|
19
|
+
Attributes:
|
|
20
|
+
scraper (cloudscraper.CloudScraper): The scraper instance for handling HTTP requests.
|
|
21
|
+
url (str): The API endpoint for the Pi.ai chat service.
|
|
22
|
+
AVAILABLE_VOICES (Dict[str, int]): A dictionary mapping voice names to their corresponding IDs.
|
|
23
|
+
headers (Dict[str, str]): The headers to be used in HTTP requests to the API.
|
|
24
|
+
"""
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
is_conversation: bool = True,
|
|
28
|
+
max_tokens: int = 600,
|
|
29
|
+
timeout: int = 30,
|
|
30
|
+
intro: str = None,
|
|
31
|
+
filepath: str = None,
|
|
32
|
+
update_file: bool = True,
|
|
33
|
+
proxies: dict = {},
|
|
34
|
+
history_offset: int = 10250,
|
|
35
|
+
act: str = None,
|
|
36
|
+
):
|
|
37
|
+
"""Initializes the PiAI class for interacting with the Pi.ai chat API.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
is_conversation (bool, optional): Flag for enabling conversational mode. Defaults to True.
|
|
41
|
+
max_tokens (int, optional): Maximum number of tokens to generate in the response. Defaults to 600.
|
|
42
|
+
timeout (int, optional): Timeout duration for HTTP requests in seconds. Defaults to 30.
|
|
43
|
+
intro (str, optional): Introductory prompt for the conversation. Defaults to None.
|
|
44
|
+
filepath (str, optional): Path to a file for storing conversation history. Defaults to None.
|
|
45
|
+
update_file (bool, optional): Indicates whether to update the file with new prompts and responses. Defaults to True.
|
|
46
|
+
proxies (dict, optional): Dictionary of HTTP request proxies. Defaults to an empty dictionary.
|
|
47
|
+
history_offset (int, optional): Number of last messages to retain in conversation history. Defaults to 10250.
|
|
48
|
+
act (str|int, optional): Key or index for selecting an awesome prompt to use as an intro. Defaults to None.
|
|
49
|
+
"""
|
|
50
|
+
self.scraper = cloudscraper.create_scraper()
|
|
51
|
+
self.url = 'https://pi.ai/api/chat'
|
|
52
|
+
self.AVAILABLE_VOICES: Dict[str, str] = {
|
|
53
|
+
"William": 1,
|
|
54
|
+
"Samantha": 2,
|
|
55
|
+
"Peter": 3,
|
|
56
|
+
"Amy": 4,
|
|
57
|
+
"Alice": 5,
|
|
58
|
+
"Harry": 6
|
|
59
|
+
}
|
|
60
|
+
self.headers = {
|
|
61
|
+
'Accept': 'text/event-stream',
|
|
62
|
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
63
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
64
|
+
'Content-Type': 'application/json',
|
|
65
|
+
'DNT': '1',
|
|
66
|
+
'Origin': 'https://pi.ai',
|
|
67
|
+
'Referer': 'https://pi.ai/talk',
|
|
68
|
+
'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
69
|
+
'Sec-CH-UA-Mobile': '?0',
|
|
70
|
+
'Sec-CH-UA-Platform': '"Windows"',
|
|
71
|
+
'Sec-Fetch-Dest': 'empty',
|
|
72
|
+
'Sec-Fetch-Mode': 'cors',
|
|
73
|
+
'Sec-Fetch-Site': 'same-origin',
|
|
74
|
+
'User-Agent': LitAgent().random(),
|
|
75
|
+
'X-Api-Version': '3'
|
|
76
|
+
}
|
|
77
|
+
self.cookies = {
|
|
78
|
+
'__Host-session': 'Ca5SoyAMJEaaB79jj1T69',
|
|
79
|
+
'__cf_bm': 'g07oaL0jcstNfKDyZv7_YFjN0jnuBZjbMiXOWhy7V7A-1723536536-1.0.1.1-xwukd03L7oIAUqPG.OHbFNatDdHGZ28mRGsbsqfjBlpuy.b8w6UZIk8F3knMhhtNzwo4JQhBVdtYOlG0MvAw8A'
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
self.session = requests.Session()
|
|
83
|
+
self.is_conversation = is_conversation
|
|
84
|
+
self.max_tokens_to_sample = max_tokens
|
|
85
|
+
self.stream_chunk_size = 64
|
|
86
|
+
self.timeout = timeout
|
|
87
|
+
self.last_response = {}
|
|
88
|
+
self.conversation_id = None
|
|
89
|
+
|
|
90
|
+
self.__available_optimizers = (
|
|
91
|
+
method
|
|
92
|
+
for method in dir(Optimizers)
|
|
93
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
94
|
+
)
|
|
95
|
+
self.session.headers.update(self.headers)
|
|
96
|
+
Conversation.intro = (
|
|
97
|
+
AwesomePrompts().get_act(
|
|
98
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
99
|
+
)
|
|
100
|
+
if act
|
|
101
|
+
else intro or Conversation.intro
|
|
102
|
+
)
|
|
103
|
+
self.conversation = Conversation(
|
|
104
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
105
|
+
)
|
|
106
|
+
self.conversation.history_offset = history_offset
|
|
107
|
+
self.session.proxies = proxies
|
|
108
|
+
# Initialize conversation ID
|
|
109
|
+
if self.is_conversation:
|
|
110
|
+
self.start_conversation()
|
|
111
|
+
|
|
112
|
+
def start_conversation(self) -> str:
|
|
113
|
+
response = self.scraper.post(
|
|
114
|
+
"https://pi.ai/api/chat/start",
|
|
115
|
+
headers=self.headers,
|
|
116
|
+
cookies=self.cookies,
|
|
117
|
+
json={},
|
|
118
|
+
timeout=self.timeout
|
|
119
|
+
)
|
|
120
|
+
data = response.json()
|
|
121
|
+
self.conversation_id = data['conversations'][0]['sid']
|
|
122
|
+
return self.conversation_id
|
|
123
|
+
|
|
124
|
+
def ask(
|
|
125
|
+
self,
|
|
126
|
+
prompt: str,
|
|
127
|
+
voice_name:str,
|
|
128
|
+
stream: bool = False,
|
|
129
|
+
raw: bool = False,
|
|
130
|
+
optimizer: str = None,
|
|
131
|
+
conversationally: bool = False,
|
|
132
|
+
verbose:bool = None,
|
|
133
|
+
output_file:str = None
|
|
134
|
+
) -> dict:
|
|
135
|
+
"""Interact with the AI by sending a prompt and receiving a response.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
prompt (str): The prompt to be sent to the AI.
|
|
139
|
+
voice_name (str): The name of the voice to use for audio responses.
|
|
140
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
141
|
+
raw (bool, optional): If True, returns the raw response as received. Defaults to False.
|
|
142
|
+
optimizer (str, optional): Name of the prompt optimizer to use - `[code, shell_command]`. Defaults to None.
|
|
143
|
+
conversationally (bool, optional): If True, chat conversationally when using optimizer. Defaults to False.
|
|
144
|
+
verbose (bool, optional): If True, provides detailed output. Defaults to None.
|
|
145
|
+
output_file (str, optional): File path to save the output. Defaults to None.
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
dict: A dictionary containing the AI's response.
|
|
149
|
+
```json
|
|
150
|
+
{
|
|
151
|
+
"text": "How may I assist you today?"
|
|
152
|
+
}
|
|
153
|
+
```
|
|
154
|
+
"""
|
|
155
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
156
|
+
if optimizer:
|
|
157
|
+
if optimizer in self.__available_optimizers:
|
|
158
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
159
|
+
conversation_prompt if conversationally else prompt
|
|
160
|
+
)
|
|
161
|
+
else:
|
|
162
|
+
raise Exception(
|
|
163
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
data = {
|
|
167
|
+
'text': conversation_prompt,
|
|
168
|
+
'conversation': self.conversation_id
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
def for_stream():
|
|
172
|
+
response = self.scraper.post(self.url, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout)
|
|
173
|
+
output_str = response.content.decode('utf-8')
|
|
174
|
+
sids = re.findall(r'"sid":"(.*?)"', output_str)
|
|
175
|
+
second_sid = sids[1] if len(sids) >= 2 else None
|
|
176
|
+
#Start the audio download in a separate thread
|
|
177
|
+
threading.Thread(target=self.download_audio_threaded, args=(voice_name, second_sid, verbose, output_file)).start()
|
|
178
|
+
|
|
179
|
+
streaming_text = ""
|
|
180
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
181
|
+
if line.startswith("data: "):
|
|
182
|
+
json_data = line[6:]
|
|
183
|
+
try:
|
|
184
|
+
parsed_data = json.loads(json_data)
|
|
185
|
+
if 'text' in parsed_data:
|
|
186
|
+
streaming_text += parsed_data['text']
|
|
187
|
+
resp = dict(text=streaming_text)
|
|
188
|
+
self.last_response.update(resp)
|
|
189
|
+
yield parsed_data if raw else resp
|
|
190
|
+
except json.JSONDecodeError:
|
|
191
|
+
continue
|
|
192
|
+
self.conversation.update_chat_history(
|
|
193
|
+
prompt, self.get_message(self.last_response)
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
def for_non_stream():
|
|
197
|
+
for _ in for_stream():
|
|
198
|
+
pass
|
|
199
|
+
return self.last_response
|
|
200
|
+
|
|
201
|
+
return for_stream() if stream else for_non_stream()
|
|
202
|
+
|
|
203
|
+
def chat(
|
|
204
|
+
self,
|
|
205
|
+
prompt: str,
|
|
206
|
+
voice_name: str = "Alice",
|
|
207
|
+
stream: bool = False,
|
|
208
|
+
optimizer: str = None,
|
|
209
|
+
conversationally: bool = False,
|
|
210
|
+
verbose:bool = True,
|
|
211
|
+
output_file:str = "PiAi.mp3"
|
|
212
|
+
) -> str:
|
|
213
|
+
"""Generates a response based on the provided prompt.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
prompt (str): The input prompt to be sent for generating a response.
|
|
217
|
+
voice_name (str, optional): The name of the voice to use for the response. Defaults to "Alice".
|
|
218
|
+
stream (bool, optional): Flag for streaming the response. Defaults to False.
|
|
219
|
+
optimizer (str, optional): The name of the prompt optimizer to use - `[code, shell_command]`. Defaults to None.
|
|
220
|
+
conversationally (bool, optional): Indicates whether to chat conversationally when using the optimizer. Defaults to False.
|
|
221
|
+
verbose (bool, optional): Flag to indicate if verbose output is desired. Defaults to True.
|
|
222
|
+
output_file (str, optional): The file path where the audio will be saved. Defaults to "PiAi.mp3".
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
str: The generated response.
|
|
226
|
+
"""
|
|
227
|
+
assert (
|
|
228
|
+
voice_name in self.AVAILABLE_VOICES
|
|
229
|
+
), f"Voice '{voice_name}' not one of [{', '.join(self.AVAILABLE_VOICES.keys())}]"
|
|
230
|
+
def for_stream():
|
|
231
|
+
for response in self.ask(
|
|
232
|
+
prompt, voice_name, True, optimizer=optimizer, conversationally=conversationally,
|
|
233
|
+
verbose=verbose,
|
|
234
|
+
output_file=output_file
|
|
235
|
+
):
|
|
236
|
+
yield self.get_message(response).encode('utf-8').decode('utf-8')
|
|
237
|
+
|
|
238
|
+
def for_non_stream():
|
|
239
|
+
return self.get_message(
|
|
240
|
+
self.ask(
|
|
241
|
+
prompt,
|
|
242
|
+
voice_name,
|
|
243
|
+
False,
|
|
244
|
+
optimizer=optimizer,
|
|
245
|
+
conversationally=conversationally,
|
|
246
|
+
verbose=verbose,
|
|
247
|
+
output_file=output_file
|
|
248
|
+
)
|
|
249
|
+
).encode('utf-8').decode('utf-8')
|
|
250
|
+
|
|
251
|
+
return for_stream() if stream else for_non_stream()
|
|
252
|
+
|
|
253
|
+
def get_message(self, response: dict) -> str:
|
|
254
|
+
"""Retrieves message only from response
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
response (dict): Response generated by `self.ask`
|
|
258
|
+
|
|
259
|
+
Returns:
|
|
260
|
+
str: Message extracted
|
|
261
|
+
"""
|
|
262
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
263
|
+
return response["text"]
|
|
264
|
+
|
|
265
|
+
def download_audio_threaded(self, voice_name: str, second_sid: str, verbose:bool, output_file:str) -> None:
|
|
266
|
+
"""Downloads audio in a separate thread.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
voice_name (str): The name of the desired voice.
|
|
270
|
+
second_sid (str): The message SID for the audio request.
|
|
271
|
+
verbose (bool): Flag to indicate if verbose output is desired.
|
|
272
|
+
output_file (str): The file path where the audio will be saved.
|
|
273
|
+
"""
|
|
274
|
+
params = {
|
|
275
|
+
'mode': 'eager',
|
|
276
|
+
'voice': f'voice{self.AVAILABLE_VOICES[voice_name]}',
|
|
277
|
+
'messageSid': second_sid,
|
|
278
|
+
}
|
|
279
|
+
try:
|
|
280
|
+
audio_response = self.scraper.get('https://pi.ai/api/chat/voice', params=params, cookies=self.cookies, headers=self.headers, timeout=self.timeout)
|
|
281
|
+
audio_response.raise_for_status() # Raise an exception for bad status codes
|
|
282
|
+
with open(output_file, "wb") as file:
|
|
283
|
+
file.write(audio_response.content)
|
|
284
|
+
if verbose:print("\nAudio file downloaded successfully.")
|
|
285
|
+
except requests.exceptions.RequestException as e:
|
|
286
|
+
if verbose:print(f"\nFailed to download audio file. Error: {e}")
|
|
287
|
+
|
|
288
|
+
if __name__ == '__main__':
|
|
289
|
+
from rich import print
|
|
290
|
+
ai = PiAI()
|
|
291
|
+
response = ai.chat(input(">>> "), stream=True, verbose=False)
|
|
292
|
+
for chunk in response:
|
|
222
293
|
print(chunk, end="", flush=True)
|
webscout/Provider/PizzaGPT.py
CHANGED
|
@@ -7,7 +7,7 @@ from webscout.AIutel import Conversation
|
|
|
7
7
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider, AsyncProvider
|
|
9
9
|
from webscout import exceptions
|
|
10
|
-
|
|
10
|
+
from webscout import LitAgent as Lit
|
|
11
11
|
|
|
12
12
|
class PIZZAGPT(Provider):
|
|
13
13
|
"""
|
|
@@ -63,7 +63,7 @@ class PIZZAGPT(Provider):
|
|
|
63
63
|
"sec-fetch-dest": "empty",
|
|
64
64
|
"sec-fetch-mode": "cors",
|
|
65
65
|
"sec-fetch-site": "same-origin",
|
|
66
|
-
"user-agent":
|
|
66
|
+
"user-agent": Lit().random(),
|
|
67
67
|
"x-secret": "Marinara"
|
|
68
68
|
}
|
|
69
69
|
|
|
@@ -133,7 +133,7 @@ class PIZZAGPT(Provider):
|
|
|
133
133
|
)
|
|
134
134
|
|
|
135
135
|
resp = response.json()
|
|
136
|
-
self.last_response.update(dict(text=resp['
|
|
136
|
+
self.last_response.update(dict(text=resp['content']))
|
|
137
137
|
self.conversation.update_chat_history(
|
|
138
138
|
prompt, self.get_message(self.last_response)
|
|
139
139
|
)
|
webscout/Provider/Reka.py
CHANGED