webscout 6.4__py3-none-any.whl → 6.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +7 -54
- webscout/DWEBS.py +48 -26
- webscout/{YTdownloader.py → Extra/YTToolkit/YTdownloader.py} +990 -1103
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/{transcriber.py → Extra/YTToolkit/transcriber.py} +1 -1
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +45 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +37 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +60 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +102 -0
- webscout/Extra/__init__.py +2 -1
- webscout/Extra/autocoder/autocoder_utiles.py +119 -101
- webscout/Extra/autocoder/rawdog.py +679 -680
- webscout/Extra/gguf.py +441 -441
- webscout/Extra/markdownlite/__init__.py +862 -0
- webscout/Extra/weather_ascii.py +2 -2
- webscout/Provider/AISEARCH/__init__.py +2 -0
- webscout/Provider/AISEARCH/ooai.py +155 -0
- webscout/Provider/Amigo.py +70 -85
- webscout/Provider/{prefind.py → Jadve.py} +72 -70
- webscout/Provider/Netwrck.py +235 -0
- webscout/Provider/Openai.py +4 -3
- webscout/Provider/PI.py +292 -221
- webscout/Provider/PizzaGPT.py +3 -3
- webscout/Provider/Reka.py +0 -1
- webscout/Provider/TTS/__init__.py +5 -1
- webscout/Provider/TTS/deepgram.py +183 -0
- webscout/Provider/TTS/elevenlabs.py +137 -0
- webscout/Provider/TTS/gesserit.py +151 -0
- webscout/Provider/TTS/murfai.py +139 -0
- webscout/Provider/TTS/parler.py +134 -107
- webscout/Provider/TTS/streamElements.py +360 -275
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TeachAnything.py +15 -2
- webscout/Provider/Youchat.py +42 -8
- webscout/Provider/__init__.py +8 -21
- webscout/Provider/meta.py +794 -779
- webscout/Provider/multichat.py +230 -0
- webscout/Provider/promptrefine.py +2 -2
- webscout/Provider/talkai.py +10 -13
- webscout/Provider/turboseek.py +5 -4
- webscout/Provider/tutorai.py +8 -112
- webscout/Provider/typegpt.py +5 -7
- webscout/Provider/x0gpt.py +81 -9
- webscout/Provider/yep.py +123 -361
- webscout/__init__.py +33 -28
- webscout/conversation.py +24 -9
- webscout/exceptions.py +188 -20
- webscout/litprinter/__init__.py +719 -831
- webscout/litprinter/colors.py +54 -0
- webscout/optimizers.py +420 -270
- webscout/prompt_manager.py +279 -279
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +571 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/core.py +884 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +38 -0
- webscout/update_checker.py +184 -125
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +55 -0
- webscout/zeroart/base.py +60 -0
- webscout/zeroart/effects.py +99 -0
- webscout/zeroart/fonts.py +816 -0
- webscout/zerodir/__init__.py +225 -0
- {webscout-6.4.dist-info → webscout-6.6.dist-info}/METADATA +18 -231
- webscout-6.6.dist-info/RECORD +197 -0
- webscout-6.6.dist-info/top_level.txt +2 -0
- webstoken/__init__.py +30 -0
- webstoken/classifier.py +189 -0
- webstoken/keywords.py +216 -0
- webstoken/language.py +128 -0
- webstoken/ner.py +164 -0
- webstoken/normalizer.py +35 -0
- webstoken/processor.py +77 -0
- webstoken/sentiment.py +206 -0
- webstoken/stemmer.py +73 -0
- webstoken/t.py +75 -0
- webstoken/tagger.py +60 -0
- webstoken/tokenizer.py +158 -0
- webscout/Agents/Onlinesearcher.py +0 -182
- webscout/Agents/__init__.py +0 -2
- webscout/Agents/functioncall.py +0 -248
- webscout/Bing_search.py +0 -251
- webscout/Provider/Perplexity.py +0 -599
- webscout/Provider/RoboCoders.py +0 -206
- webscout/Provider/genspark.py +0 -225
- webscout/Provider/perplexitylabs.py +0 -265
- webscout/Provider/twitterclone.py +0 -251
- webscout/Provider/upstage.py +0 -230
- webscout/gpt4free.py +0 -666
- webscout/requestsHTMLfix.py +0 -775
- webscout/webai.py +0 -2590
- webscout-6.4.dist-info/RECORD +0 -154
- webscout-6.4.dist-info/top_level.txt +0 -1
- /webscout/Provider/{felo_search.py → AISEARCH/felo_search.py} +0 -0
- {webscout-6.4.dist-info → webscout-6.6.dist-info}/LICENSE.md +0 -0
- {webscout-6.4.dist-info → webscout-6.6.dist-info}/WHEEL +0 -0
- {webscout-6.4.dist-info → webscout-6.6.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Optional, Generator
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
|
|
11
|
+
# Model configurations
|
|
12
|
+
MODEL_CONFIGS = {
|
|
13
|
+
"llama": {
|
|
14
|
+
"endpoint": "https://www.multichatai.com/api/chat/meta",
|
|
15
|
+
"models": {
|
|
16
|
+
"llama-3.1-70b-versatile": {"contextLength": 8192},
|
|
17
|
+
"llama-3.2-90b-vision-preview": {"contextLength": 32768},
|
|
18
|
+
"llama-3.2-11b-vision-preview": {"contextLength": 32768},
|
|
19
|
+
},
|
|
20
|
+
},
|
|
21
|
+
"alibaba": {
|
|
22
|
+
"endpoint": "https://www.multichatai.com/api/chat/alibaba",
|
|
23
|
+
"models": {
|
|
24
|
+
"Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
|
|
25
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
|
|
26
|
+
},
|
|
27
|
+
},
|
|
28
|
+
"cohere": {
|
|
29
|
+
"endpoint": "https://www.multichatai.com/api/chat/cohere",
|
|
30
|
+
"models": {"command-r": {"contextLength": 128000}},
|
|
31
|
+
},
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
class MultiChatAI(Provider):
|
|
35
|
+
"""
|
|
36
|
+
A class to interact with the MultiChatAI API.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(
|
|
40
|
+
self,
|
|
41
|
+
is_conversation: bool = True,
|
|
42
|
+
max_tokens: int = 4000,
|
|
43
|
+
timeout: int = 30,
|
|
44
|
+
intro: str = None,
|
|
45
|
+
filepath: str = None,
|
|
46
|
+
update_file: bool = True,
|
|
47
|
+
proxies: dict = {},
|
|
48
|
+
history_offset: int = 10250,
|
|
49
|
+
act: str = None,
|
|
50
|
+
model: str = "llama-3.1-70b-versatile", # Default model
|
|
51
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
52
|
+
temperature: float = 0.5,
|
|
53
|
+
presence_penalty: int = 0,
|
|
54
|
+
frequency_penalty: int = 0,
|
|
55
|
+
top_p: float = 1,
|
|
56
|
+
):
|
|
57
|
+
"""Initializes the MultiChatAI API client."""
|
|
58
|
+
self.session = requests.Session()
|
|
59
|
+
self.is_conversation = is_conversation
|
|
60
|
+
self.max_tokens_to_sample = max_tokens
|
|
61
|
+
self.timeout = timeout
|
|
62
|
+
self.last_response = {}
|
|
63
|
+
self.model = model
|
|
64
|
+
self.system_prompt = system_prompt
|
|
65
|
+
self.temperature = temperature
|
|
66
|
+
self.presence_penalty = presence_penalty
|
|
67
|
+
self.frequency_penalty = frequency_penalty
|
|
68
|
+
self.top_p = top_p
|
|
69
|
+
self.headers = {
|
|
70
|
+
"accept": "*/*",
|
|
71
|
+
"accept-language": "en-US,en;q=0.9",
|
|
72
|
+
"content-type": "text/plain;charset=UTF-8",
|
|
73
|
+
"origin": "https://www.multichatai.com",
|
|
74
|
+
"referer": "https://www.multichatai.com/",
|
|
75
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
|
|
76
|
+
}
|
|
77
|
+
self.session.headers.update(self.headers)
|
|
78
|
+
self.session.proxies = proxies
|
|
79
|
+
|
|
80
|
+
self.__available_optimizers = (
|
|
81
|
+
method
|
|
82
|
+
for method in dir(Optimizers)
|
|
83
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
84
|
+
)
|
|
85
|
+
Conversation.intro = (
|
|
86
|
+
AwesomePrompts().get_act(
|
|
87
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
88
|
+
)
|
|
89
|
+
if act
|
|
90
|
+
else intro or Conversation.intro
|
|
91
|
+
)
|
|
92
|
+
self.conversation = Conversation(
|
|
93
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
94
|
+
)
|
|
95
|
+
self.conversation.history_offset = history_offset
|
|
96
|
+
|
|
97
|
+
# Parse provider and model name
|
|
98
|
+
self.provider = "llama" # Default provider
|
|
99
|
+
self.model_name = self.model
|
|
100
|
+
|
|
101
|
+
# Check if model exists in any provider
|
|
102
|
+
model_found = False
|
|
103
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
104
|
+
if self.model in config["models"]:
|
|
105
|
+
self.provider = provider
|
|
106
|
+
self.model_name = self.model
|
|
107
|
+
model_found = True
|
|
108
|
+
break
|
|
109
|
+
|
|
110
|
+
if not model_found:
|
|
111
|
+
available_models = []
|
|
112
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
113
|
+
for model in config["models"].keys():
|
|
114
|
+
available_models.append(f"{provider}/{model}")
|
|
115
|
+
raise ValueError(
|
|
116
|
+
f"Invalid model: {self.model}\nAvailable models: {', '.join(available_models)}"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
def _get_endpoint(self) -> str:
|
|
120
|
+
"""Get the API endpoint for the current provider."""
|
|
121
|
+
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
122
|
+
|
|
123
|
+
def _get_chat_settings(self) -> Dict[str, Any]:
|
|
124
|
+
"""Get chat settings for the current model."""
|
|
125
|
+
base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
|
|
126
|
+
return {
|
|
127
|
+
"model": self.model,
|
|
128
|
+
"prompt": self.system_prompt,
|
|
129
|
+
"temperature": self.temperature,
|
|
130
|
+
"contextLength": base_settings["contextLength"],
|
|
131
|
+
"includeProfileContext": True,
|
|
132
|
+
"includeWorkspaceInstructions": True,
|
|
133
|
+
"embeddingsProvider": "openai"
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
def ask(
|
|
137
|
+
self,
|
|
138
|
+
prompt: str,
|
|
139
|
+
stream: bool = False,
|
|
140
|
+
raw: bool = False,
|
|
141
|
+
optimizer: str = None,
|
|
142
|
+
conversationally: bool = False,
|
|
143
|
+
) -> Dict[str, Any] | Generator:
|
|
144
|
+
"""Sends a prompt to the MultiChatAI API and returns the response."""
|
|
145
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
146
|
+
if optimizer:
|
|
147
|
+
if optimizer in self.__available_optimizers:
|
|
148
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
149
|
+
conversation_prompt if conversationally else prompt
|
|
150
|
+
)
|
|
151
|
+
else:
|
|
152
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
153
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
payload = {
|
|
157
|
+
"chatSettings": self._get_chat_settings(),
|
|
158
|
+
"messages": [
|
|
159
|
+
{"role": "system", "content": self.system_prompt},
|
|
160
|
+
{"role": "user", "content": conversation_prompt},
|
|
161
|
+
],
|
|
162
|
+
"customModelId": "",
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
try:
|
|
166
|
+
response = self.session.post(
|
|
167
|
+
self._get_endpoint(),
|
|
168
|
+
headers=self.headers,
|
|
169
|
+
json=payload,
|
|
170
|
+
stream=True,
|
|
171
|
+
timeout=self.timeout,
|
|
172
|
+
)
|
|
173
|
+
response.raise_for_status()
|
|
174
|
+
|
|
175
|
+
full_response = ""
|
|
176
|
+
for line in response.iter_lines():
|
|
177
|
+
if line:
|
|
178
|
+
decoded_line = line.decode("utf-8")
|
|
179
|
+
if stream:
|
|
180
|
+
yield {"text": decoded_line}
|
|
181
|
+
full_response += decoded_line
|
|
182
|
+
|
|
183
|
+
self.last_response = {"text": full_response.strip()}
|
|
184
|
+
self.conversation.update_chat_history(prompt, full_response.strip())
|
|
185
|
+
|
|
186
|
+
if not stream:
|
|
187
|
+
return self.last_response
|
|
188
|
+
|
|
189
|
+
except requests.exceptions.RequestException as e:
|
|
190
|
+
raise exceptions.ProviderConnectionError(f"API request failed: {e}") from e
|
|
191
|
+
except json.JSONDecodeError as e:
|
|
192
|
+
raise exceptions.InvalidResponseError(f"Invalid JSON response: {e}") from e
|
|
193
|
+
except Exception as e:
|
|
194
|
+
raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {e}") from e
|
|
195
|
+
|
|
196
|
+
def chat(
|
|
197
|
+
self,
|
|
198
|
+
prompt: str,
|
|
199
|
+
stream: bool = False,
|
|
200
|
+
optimizer: str = None,
|
|
201
|
+
conversationally: bool = False,
|
|
202
|
+
) -> str | Generator[str, None, None]:
|
|
203
|
+
"""Generate response."""
|
|
204
|
+
if stream:
|
|
205
|
+
for chunk in self.ask(
|
|
206
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
207
|
+
):
|
|
208
|
+
if isinstance(chunk, dict):
|
|
209
|
+
yield chunk.get("text", "")
|
|
210
|
+
else:
|
|
211
|
+
yield str(chunk)
|
|
212
|
+
else:
|
|
213
|
+
response = self.ask(
|
|
214
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
215
|
+
)
|
|
216
|
+
return response.get("text", "") if isinstance(response, dict) else str(response)
|
|
217
|
+
|
|
218
|
+
def get_message(self, response: Dict[str, Any] | str) -> str:
|
|
219
|
+
"""Retrieves message from response."""
|
|
220
|
+
if isinstance(response, dict):
|
|
221
|
+
return response.get("text", "")
|
|
222
|
+
return str(response)
|
|
223
|
+
|
|
224
|
+
if __name__ == "__main__":
|
|
225
|
+
from rich import print
|
|
226
|
+
|
|
227
|
+
ai = MultiChatAI(model="llama-3.1-70b-versatile")
|
|
228
|
+
response = ai.chat("What is the meaning of life?", stream=True)
|
|
229
|
+
for chunk in response:
|
|
230
|
+
print(chunk, end="", flush=True)
|
|
@@ -6,7 +6,7 @@ from webscout.AIutel import Optimizers
|
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
7
|
from webscout.AIutel import AwesomePrompts
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
|
-
from
|
|
9
|
+
from webscout import LitAgent as UserAgent
|
|
10
10
|
|
|
11
11
|
class PromptRefine(Provider):
|
|
12
12
|
"""
|
|
@@ -55,7 +55,7 @@ class PromptRefine(Provider):
|
|
|
55
55
|
self.headers = {
|
|
56
56
|
'origin': 'https://www.promptrefine.com',
|
|
57
57
|
'referer': 'https://www.promptrefine.com/prompt/new',
|
|
58
|
-
'user-agent': UserAgent().random
|
|
58
|
+
'user-agent': UserAgent().random()
|
|
59
59
|
}
|
|
60
60
|
|
|
61
61
|
self.__available_optimizers = (
|
webscout/Provider/talkai.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import uuid
|
|
2
|
-
import
|
|
2
|
+
import cloudscraper
|
|
3
3
|
import json
|
|
4
4
|
from typing import Any, Dict, Optional, Generator
|
|
5
5
|
|
|
@@ -8,7 +8,7 @@ from webscout.AIutel import Conversation
|
|
|
8
8
|
from webscout.AIutel import AwesomePrompts
|
|
9
9
|
from webscout.AIbase import Provider
|
|
10
10
|
from webscout import exceptions
|
|
11
|
-
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
12
|
class Talkai(Provider):
|
|
13
13
|
"""
|
|
14
14
|
A class to interact with the Talkai.info API.
|
|
@@ -30,7 +30,7 @@ class Talkai(Provider):
|
|
|
30
30
|
"""
|
|
31
31
|
Initializes the Talkai.info API with given parameters.
|
|
32
32
|
"""
|
|
33
|
-
self.session =
|
|
33
|
+
self.session = cloudscraper.create_scraper()
|
|
34
34
|
self.is_conversation = is_conversation
|
|
35
35
|
self.max_tokens_to_sample = max_tokens
|
|
36
36
|
self.api_endpoint = "https://talkai.info/chat/send/"
|
|
@@ -43,9 +43,8 @@ class Talkai(Provider):
|
|
|
43
43
|
'Content-Type': 'application/json',
|
|
44
44
|
'Origin': 'https://talkai.info',
|
|
45
45
|
'Referer': 'https://talkai.info/chat/',
|
|
46
|
-
'User-Agent':
|
|
47
|
-
'
|
|
48
|
-
'sec-ch-ua-platform': '"Windows"'
|
|
46
|
+
'User-Agent': LitAgent().random(),
|
|
47
|
+
'Cookie': '_csrf-front=e19e203a958c74e439261f6860535403324c9ab2ede76449e6407e54e1f366afa%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22QbnGY7XS5q9i3JnDvi6KRzrOk0D6XFnk%22%3B%7D; _ga=GA1.1.1383924142.1734246140; _ym_uid=1723397035198647017; _ym_d=1734246141; _ym_isad=1; _ym_visorc=b; talkai-front=ngbj23of1t0ujg2raoa3l57vqe; _ga_FB7V9WMN30=GS1.1.1734246139.1.1734246143.0.0.0'
|
|
49
48
|
}
|
|
50
49
|
self.__available_optimizers = (
|
|
51
50
|
method
|
|
@@ -87,9 +86,7 @@ class Talkai(Provider):
|
|
|
87
86
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
88
87
|
if optimizer:
|
|
89
88
|
if optimizer in self.__available_optimizers:
|
|
90
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
91
|
-
conversation_prompt if conversationally else prompt
|
|
92
|
-
)
|
|
89
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
93
90
|
else:
|
|
94
91
|
raise exceptions.FailedToGenerateResponseError(
|
|
95
92
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
@@ -111,7 +108,7 @@ class Talkai(Provider):
|
|
|
111
108
|
|
|
112
109
|
def for_stream():
|
|
113
110
|
try:
|
|
114
|
-
with
|
|
111
|
+
with self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
|
|
115
112
|
response.raise_for_status()
|
|
116
113
|
|
|
117
114
|
full_response = ""
|
|
@@ -120,7 +117,7 @@ class Talkai(Provider):
|
|
|
120
117
|
decoded_line = line.decode('utf-8')
|
|
121
118
|
if 'event: trylimit' in decoded_line:
|
|
122
119
|
break # Stop if trylimit event is encountered
|
|
123
|
-
if decoded_line.startswith('data:
|
|
120
|
+
if decoded_line.startswith('data:'):
|
|
124
121
|
data = decoded_line[6:] # Remove 'data: ' prefix
|
|
125
122
|
full_response += data
|
|
126
123
|
yield data if raw else dict(text=data)
|
|
@@ -130,7 +127,7 @@ class Talkai(Provider):
|
|
|
130
127
|
prompt, self.get_message(self.last_response)
|
|
131
128
|
)
|
|
132
129
|
|
|
133
|
-
except
|
|
130
|
+
except cloudscraper.exceptions as e:
|
|
134
131
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
135
132
|
|
|
136
133
|
def for_non_stream():
|
|
@@ -193,4 +190,4 @@ if __name__ == "__main__":
|
|
|
193
190
|
t = Talkai()
|
|
194
191
|
resp = t.chat("write me about AI", stream=True)
|
|
195
192
|
for chunk in resp:
|
|
196
|
-
print(chunk, end="", flush=True)
|
|
193
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/turboseek.py
CHANGED
|
@@ -7,7 +7,7 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
|
7
7
|
from webscout.AIbase import Provider, AsyncProvider
|
|
8
8
|
from webscout import exceptions
|
|
9
9
|
from typing import Any, AsyncGenerator, Dict
|
|
10
|
-
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
11
|
|
|
12
12
|
class TurboSeek(Provider):
|
|
13
13
|
"""
|
|
@@ -65,7 +65,7 @@ class TurboSeek(Provider):
|
|
|
65
65
|
"sec-fetch-dest": "empty",
|
|
66
66
|
"sec-fetch-mode": "cors",
|
|
67
67
|
"sec-fetch-site": "same-origin",
|
|
68
|
-
"user-agent":
|
|
68
|
+
"user-agent": LitAgent().random(),
|
|
69
69
|
}
|
|
70
70
|
|
|
71
71
|
self.__available_optimizers = (
|
|
@@ -145,7 +145,7 @@ class TurboSeek(Provider):
|
|
|
145
145
|
data = json.loads(value[6:].decode('utf-8')) # Decode manually
|
|
146
146
|
if "text" in data:
|
|
147
147
|
streaming_text += data["text"]
|
|
148
|
-
resp = dict(text=
|
|
148
|
+
resp = dict(text=data["text"])
|
|
149
149
|
self.last_response.update(resp)
|
|
150
150
|
yield value if raw else resp
|
|
151
151
|
except json.decoder.JSONDecodeError:
|
|
@@ -210,6 +210,7 @@ class TurboSeek(Provider):
|
|
|
210
210
|
if __name__ == '__main__':
|
|
211
211
|
from rich import print
|
|
212
212
|
ai = TurboSeek()
|
|
213
|
-
response = ai.chat("
|
|
213
|
+
response = ai.chat("hello buddy", stream=True)
|
|
214
214
|
for chunk in response:
|
|
215
215
|
print(chunk, end="", flush=True)
|
|
216
|
+
|
webscout/Provider/tutorai.py
CHANGED
|
@@ -1,113 +1,15 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import os
|
|
3
|
-
import time
|
|
4
3
|
from typing import List, Optional
|
|
5
4
|
from string import punctuation
|
|
6
5
|
from random import choice
|
|
7
|
-
from requests.exceptions import RequestException
|
|
8
6
|
import json
|
|
9
|
-
from html.parser import HTMLParser
|
|
10
|
-
import re
|
|
11
|
-
import html.entities
|
|
12
|
-
|
|
13
7
|
from webscout.AIutel import Optimizers
|
|
14
8
|
from webscout.AIutel import Conversation
|
|
15
9
|
from webscout.AIutel import AwesomePrompts
|
|
16
10
|
from webscout.AIbase import Provider
|
|
17
11
|
from webscout import exceptions
|
|
18
|
-
|
|
19
|
-
class TerminalFormatter(HTMLParser):
|
|
20
|
-
"""
|
|
21
|
-
A custom HTML parser that converts HTML content to terminal-friendly formatted text
|
|
22
|
-
using ANSI escape codes.
|
|
23
|
-
"""
|
|
24
|
-
|
|
25
|
-
def __init__(self):
|
|
26
|
-
super().__init__()
|
|
27
|
-
self.output = []
|
|
28
|
-
self.list_stack = []
|
|
29
|
-
self.ol_counters = []
|
|
30
|
-
self.bold = False
|
|
31
|
-
self.italic = False
|
|
32
|
-
|
|
33
|
-
def handle_starttag(self, tag, attrs):
|
|
34
|
-
if tag in ["strong", "b"]:
|
|
35
|
-
self.output.append("\033[1m") # Bold
|
|
36
|
-
self.bold = True
|
|
37
|
-
elif tag in ["em", "i"]:
|
|
38
|
-
self.output.append("\033[3m") # Italic
|
|
39
|
-
self.italic = True
|
|
40
|
-
elif tag == "br":
|
|
41
|
-
self.output.append("\n")
|
|
42
|
-
elif tag in ["p", "div", "h1", "h2", "h3", "h4", "h5", "h6"]:
|
|
43
|
-
self.output.append("\n")
|
|
44
|
-
elif tag == "ul":
|
|
45
|
-
self.list_stack.append("ul")
|
|
46
|
-
self.output.append("\n")
|
|
47
|
-
elif tag == "ol":
|
|
48
|
-
self.list_stack.append("ol")
|
|
49
|
-
self.ol_counters.append(1)
|
|
50
|
-
self.output.append("\n")
|
|
51
|
-
elif tag == "li":
|
|
52
|
-
if self.list_stack:
|
|
53
|
-
if self.list_stack[-1] == "ul":
|
|
54
|
-
self.output.append("• ") # Bullet point
|
|
55
|
-
elif self.list_stack[-1] == "ol":
|
|
56
|
-
number = self.ol_counters[-1]
|
|
57
|
-
self.output.append(f"{number}. ")
|
|
58
|
-
self.ol_counters[-1] += 1
|
|
59
|
-
|
|
60
|
-
def handle_endtag(self, tag):
|
|
61
|
-
if tag in ["strong", "b"]:
|
|
62
|
-
self.output.append("\033[0m") # Reset
|
|
63
|
-
self.bold = False
|
|
64
|
-
elif tag in ["em", "i"]:
|
|
65
|
-
self.output.append("\033[0m") # Reset
|
|
66
|
-
self.italic = False
|
|
67
|
-
elif tag in ["p", "div", "h1", "h2", "h3", "h4", "h5", "h6"]:
|
|
68
|
-
self.output.append("\n")
|
|
69
|
-
elif tag == "ul":
|
|
70
|
-
if self.list_stack and self.list_stack[-1] == "ul":
|
|
71
|
-
self.list_stack.pop()
|
|
72
|
-
self.output.append("\n")
|
|
73
|
-
elif tag == "ol":
|
|
74
|
-
if self.list_stack and self.list_stack[-1] == "ol":
|
|
75
|
-
self.list_stack.pop()
|
|
76
|
-
self.ol_counters.pop()
|
|
77
|
-
self.output.append("\n")
|
|
78
|
-
elif tag == "li":
|
|
79
|
-
self.output.append("\n")
|
|
80
|
-
|
|
81
|
-
def handle_data(self, data):
|
|
82
|
-
# Remove ANSI escape codes from the data
|
|
83
|
-
data = re.sub(r'\033\[[0-9;]*m', '', data)
|
|
84
|
-
data = re.sub(r"\s+", " ", data)
|
|
85
|
-
self.output.append(data)
|
|
86
|
-
|
|
87
|
-
def handle_entityref(self, name):
|
|
88
|
-
entity = f"&{name};"
|
|
89
|
-
char = html.entities.name2codepoint.get(name, entity)
|
|
90
|
-
self.output.append(chr(char))
|
|
91
|
-
|
|
92
|
-
def handle_charref(self, name):
|
|
93
|
-
try:
|
|
94
|
-
if name.startswith("x") or name.startswith("X"):
|
|
95
|
-
char = chr(int(name[1:], 16))
|
|
96
|
-
else:
|
|
97
|
-
char = chr(int(name))
|
|
98
|
-
self.output.append(char)
|
|
99
|
-
except ValueError:
|
|
100
|
-
self.output.append(f"&#{name};")
|
|
101
|
-
|
|
102
|
-
def get_text(self):
|
|
103
|
-
return "".join(self.output).strip()
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
def html_to_terminal(html_content):
|
|
107
|
-
parser = TerminalFormatter()
|
|
108
|
-
parser.feed(html_content)
|
|
109
|
-
return parser.get_text()
|
|
110
|
-
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
111
13
|
|
|
112
14
|
class TutorAI(Provider):
|
|
113
15
|
"""
|
|
@@ -132,7 +34,7 @@ class TutorAI(Provider):
|
|
|
132
34
|
|
|
133
35
|
Args:
|
|
134
36
|
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
135
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to
|
|
37
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 1024.
|
|
136
38
|
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
137
39
|
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
138
40
|
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
@@ -173,11 +75,7 @@ class TutorAI(Provider):
|
|
|
173
75
|
"Sec-Fetch-Dest": "empty",
|
|
174
76
|
"Sec-Fetch-Mode": "cors",
|
|
175
77
|
"Sec-Fetch-Site": "same-origin",
|
|
176
|
-
"User-Agent": (
|
|
177
|
-
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
178
|
-
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
179
|
-
"Chrome/129.0.0.0 Safari/537.36 Edg/128.0.0.0"
|
|
180
|
-
),
|
|
78
|
+
"User-Agent": LitAgent().random()
|
|
181
79
|
}
|
|
182
80
|
|
|
183
81
|
self.__available_optimizers = (
|
|
@@ -229,9 +127,7 @@ class TutorAI(Provider):
|
|
|
229
127
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
230
128
|
if optimizer:
|
|
231
129
|
if optimizer in self.__available_optimizers:
|
|
232
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
233
|
-
conversation_prompt if conversationally else prompt
|
|
234
|
-
)
|
|
130
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
235
131
|
else:
|
|
236
132
|
raise Exception(
|
|
237
133
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
@@ -268,7 +164,7 @@ class TutorAI(Provider):
|
|
|
268
164
|
homeworkify_html = response_data.get("homeworkifyResponse", "")
|
|
269
165
|
if not homeworkify_html:
|
|
270
166
|
raise exceptions.FailedToGenerateResponseError("\nNo 'homeworkifyResponse' found in the response.")
|
|
271
|
-
clean_text = html_to_terminal
|
|
167
|
+
clean_text = homeworkify_html # Removed html_to_terminal call
|
|
272
168
|
self.last_response.update(dict(text=clean_text))
|
|
273
169
|
self.conversation.update_chat_history(
|
|
274
170
|
prompt, self.get_message(self.last_response)
|
|
@@ -288,7 +184,7 @@ class TutorAI(Provider):
|
|
|
288
184
|
homeworkify_html = response_data.get("homeworkifyResponse", "")
|
|
289
185
|
if not homeworkify_html:
|
|
290
186
|
return {"text": "No content found in the response"} # Default in case content not found
|
|
291
|
-
clean_text = html_to_terminal
|
|
187
|
+
clean_text = homeworkify_html # Removed html_to_terminal call
|
|
292
188
|
|
|
293
189
|
# Simulate streaming by yielding chunks of the content
|
|
294
190
|
chunk_size = self.stream_chunk_size
|
|
@@ -349,6 +245,6 @@ if __name__ == "__main__":
|
|
|
349
245
|
from rich import print
|
|
350
246
|
|
|
351
247
|
ai = TutorAI()
|
|
352
|
-
response = ai.chat(
|
|
248
|
+
response = ai.chat("hello buddy", attachment_path=None)
|
|
353
249
|
for chunk in response:
|
|
354
|
-
print(chunk, end="", flush=True)
|
|
250
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/typegpt.py
CHANGED
|
@@ -7,7 +7,7 @@ from webscout.AIutel import Conversation
|
|
|
7
7
|
from webscout.AIutel import AwesomePrompts
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
9
|
from webscout import exceptions
|
|
10
|
-
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
11
|
class TypeGPT(Provider):
|
|
12
12
|
"""
|
|
13
13
|
A class to interact with the TypeGPT.net API. Improved to match webscout standards.
|
|
@@ -207,7 +207,6 @@ class TypeGPT(Provider):
|
|
|
207
207
|
self.presence_penalty = presence_penalty
|
|
208
208
|
self.frequency_penalty = frequency_penalty
|
|
209
209
|
self.top_p = top_p
|
|
210
|
-
|
|
211
210
|
self.headers = {
|
|
212
211
|
"authority": "chat.typegpt.net",
|
|
213
212
|
"accept": "application/json, text/event-stream",
|
|
@@ -215,7 +214,7 @@ class TypeGPT(Provider):
|
|
|
215
214
|
"content-type": "application/json",
|
|
216
215
|
"origin": "https://chat.typegpt.net",
|
|
217
216
|
"referer": "https://chat.typegpt.net/",
|
|
218
|
-
"user-agent":
|
|
217
|
+
"user-agent": LitAgent().random()
|
|
219
218
|
}
|
|
220
219
|
|
|
221
220
|
|
|
@@ -351,9 +350,8 @@ class TypeGPT(Provider):
|
|
|
351
350
|
|
|
352
351
|
|
|
353
352
|
if __name__ == "__main__":
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
response = ai.chat(input(">>> "), stream=True)
|
|
353
|
+
|
|
354
|
+
ai = TypeGPT(model="claude-3-5-sonnet-20240620")
|
|
355
|
+
response = ai.chat("hi", stream=True)
|
|
358
356
|
for chunks in response:
|
|
359
357
|
print(chunks, end="", flush=True)
|