webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +20 -0
- webscout/Litlogger/core/logger.py +123 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +50 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +174 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +231 -0
- webscout/Litlogger/styles/formats.py +377 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +91 -78
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +74 -59
- webscout/Provider/Groq.py +30 -18
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +191 -137
- webscout/Provider/Netwrck.py +62 -50
- webscout/Provider/PI.py +79 -124
- webscout/Provider/PizzaGPT.py +129 -83
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +74 -47
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +144 -136
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +67 -39
- webscout/Provider/gaurish.py +105 -66
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +60 -35
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -230
- webscout/Provider/typegpt.py +359 -356
- webscout/Provider/yep.py +5 -5
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1142 -1140
- webscout/webscout_search_async.py +635 -635
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
- webscout-7.2.dist-info/RECORD +217 -0
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout/Provider/RUBIKSAI.py +0 -272
- webscout-7.0.dist-info/RECORD +0 -199
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
webscout/Provider/llmchat.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
|
|
1
2
|
import requests
|
|
2
3
|
import json
|
|
3
4
|
from typing import Any, Dict, Optional, Generator, List
|
|
@@ -7,10 +8,12 @@ from webscout.AIutel import Conversation
|
|
|
7
8
|
from webscout.AIutel import AwesomePrompts
|
|
8
9
|
from webscout.AIbase import Provider
|
|
9
10
|
from webscout import exceptions
|
|
11
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
10
12
|
from webscout import LitAgent as Lit
|
|
13
|
+
|
|
11
14
|
class LLMChat(Provider):
|
|
12
15
|
"""
|
|
13
|
-
A class to interact with the LLMChat API.
|
|
16
|
+
A class to interact with the LLMChat API with comprehensive logging.
|
|
14
17
|
"""
|
|
15
18
|
|
|
16
19
|
AVAILABLE_MODELS = [
|
|
@@ -33,13 +36,24 @@ class LLMChat(Provider):
|
|
|
33
36
|
proxies: dict = {},
|
|
34
37
|
history_offset: int = 10250,
|
|
35
38
|
act: str = None,
|
|
36
|
-
model: str = "@cf/meta/llama-3.1-70b-instruct",
|
|
39
|
+
model: str = "@cf/meta/llama-3.1-70b-instruct",
|
|
37
40
|
system_prompt: str = "You are a helpful assistant.",
|
|
41
|
+
logging: bool = False
|
|
38
42
|
):
|
|
39
43
|
"""
|
|
40
|
-
Initializes the LLMChat API with given parameters.
|
|
44
|
+
Initializes the LLMChat API with given parameters and logging capabilities.
|
|
41
45
|
"""
|
|
46
|
+
self.logger = Logger(
|
|
47
|
+
name="LLMChat",
|
|
48
|
+
format=LogFormat.MODERN_EMOJI,
|
|
49
|
+
) if logging else None
|
|
50
|
+
|
|
51
|
+
if self.logger:
|
|
52
|
+
self.logger.info(f"Initializing LLMChat with model: {model}")
|
|
53
|
+
|
|
42
54
|
if model not in self.AVAILABLE_MODELS:
|
|
55
|
+
if self.logger:
|
|
56
|
+
self.logger.error(f"Invalid model selected: {model}")
|
|
43
57
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
44
58
|
|
|
45
59
|
self.session = requests.Session()
|
|
@@ -50,6 +64,7 @@ class LLMChat(Provider):
|
|
|
50
64
|
self.last_response = {}
|
|
51
65
|
self.model = model
|
|
52
66
|
self.system_prompt = system_prompt
|
|
67
|
+
|
|
53
68
|
self.headers = {
|
|
54
69
|
"Content-Type": "application/json",
|
|
55
70
|
"Accept": "*/*",
|
|
@@ -57,11 +72,13 @@ class LLMChat(Provider):
|
|
|
57
72
|
"Origin": "https://llmchat.in",
|
|
58
73
|
"Referer": "https://llmchat.in/"
|
|
59
74
|
}
|
|
75
|
+
|
|
60
76
|
self.__available_optimizers = (
|
|
61
77
|
method
|
|
62
78
|
for method in dir(Optimizers)
|
|
63
79
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
64
80
|
)
|
|
81
|
+
|
|
65
82
|
Conversation.intro = (
|
|
66
83
|
AwesomePrompts().get_act(
|
|
67
84
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -69,12 +86,16 @@ class LLMChat(Provider):
|
|
|
69
86
|
if act
|
|
70
87
|
else intro or Conversation.intro
|
|
71
88
|
)
|
|
89
|
+
|
|
72
90
|
self.conversation = Conversation(
|
|
73
91
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
74
92
|
)
|
|
75
93
|
self.conversation.history_offset = history_offset
|
|
76
94
|
self.session.proxies = proxies
|
|
77
95
|
|
|
96
|
+
if self.logger:
|
|
97
|
+
self.logger.info("LLMChat initialized successfully")
|
|
98
|
+
|
|
78
99
|
def ask(
|
|
79
100
|
self,
|
|
80
101
|
prompt: str,
|
|
@@ -83,24 +104,22 @@ class LLMChat(Provider):
|
|
|
83
104
|
optimizer: str = None,
|
|
84
105
|
conversationally: bool = False,
|
|
85
106
|
) -> Dict[str, Any]:
|
|
86
|
-
"""Chat with LLMChat
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
92
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
93
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
94
|
-
Returns:
|
|
95
|
-
dict: Response dictionary.
|
|
96
|
-
"""
|
|
107
|
+
"""Chat with LLMChat with logging capabilities"""
|
|
108
|
+
if self.logger:
|
|
109
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
110
|
+
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
111
|
+
|
|
97
112
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
98
113
|
if optimizer:
|
|
99
114
|
if optimizer in self.__available_optimizers:
|
|
100
115
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
101
116
|
conversation_prompt if conversationally else prompt
|
|
102
117
|
)
|
|
118
|
+
if self.logger:
|
|
119
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
103
120
|
else:
|
|
121
|
+
if self.logger:
|
|
122
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
104
123
|
raise exceptions.FailedToGenerateResponseError(
|
|
105
124
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
106
125
|
)
|
|
@@ -117,8 +136,15 @@ class LLMChat(Provider):
|
|
|
117
136
|
|
|
118
137
|
def for_stream():
|
|
119
138
|
try:
|
|
139
|
+
if self.logger:
|
|
140
|
+
self.logger.debug("Initiating streaming request to API")
|
|
141
|
+
|
|
120
142
|
with requests.post(url, json=payload, headers=self.headers, stream=True, timeout=self.timeout) as response:
|
|
121
143
|
response.raise_for_status()
|
|
144
|
+
|
|
145
|
+
if self.logger:
|
|
146
|
+
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
147
|
+
|
|
122
148
|
full_response = ""
|
|
123
149
|
for line in response.iter_lines():
|
|
124
150
|
if line:
|
|
@@ -132,19 +158,31 @@ class LLMChat(Provider):
|
|
|
132
158
|
yield response_text if raw else dict(text=response_text)
|
|
133
159
|
except json.JSONDecodeError:
|
|
134
160
|
if line.strip() != 'data: [DONE]':
|
|
135
|
-
|
|
161
|
+
if self.logger:
|
|
162
|
+
self.logger.warning(f"Failed to parse line: {line}")
|
|
136
163
|
continue
|
|
164
|
+
|
|
137
165
|
self.last_response.update(dict(text=full_response))
|
|
138
166
|
self.conversation.update_chat_history(
|
|
139
167
|
prompt, self.get_message(self.last_response)
|
|
140
168
|
)
|
|
169
|
+
|
|
141
170
|
except requests.exceptions.RequestException as e:
|
|
171
|
+
if self.logger:
|
|
172
|
+
self.logger.error(f"API request failed: {str(e)}")
|
|
142
173
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
143
174
|
|
|
144
175
|
def for_non_stream():
|
|
176
|
+
if self.logger:
|
|
177
|
+
self.logger.debug("Processing non-streaming request")
|
|
178
|
+
|
|
145
179
|
full_response = ""
|
|
146
180
|
for line in for_stream():
|
|
147
181
|
full_response += line['text'] if not raw else line
|
|
182
|
+
|
|
183
|
+
if self.logger:
|
|
184
|
+
self.logger.debug("Response processing completed")
|
|
185
|
+
|
|
148
186
|
return dict(text=full_response)
|
|
149
187
|
|
|
150
188
|
return for_stream() if stream else for_non_stream()
|
|
@@ -156,15 +194,9 @@ class LLMChat(Provider):
|
|
|
156
194
|
optimizer: str = None,
|
|
157
195
|
conversationally: bool = False,
|
|
158
196
|
) -> str | Generator[str, None, None]:
|
|
159
|
-
"""Generate response
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
163
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
164
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
165
|
-
Returns:
|
|
166
|
-
str: Response generated
|
|
167
|
-
"""
|
|
197
|
+
"""Generate response with logging capabilities"""
|
|
198
|
+
if self.logger:
|
|
199
|
+
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
168
200
|
|
|
169
201
|
def for_stream():
|
|
170
202
|
for response in self.ask(
|
|
@@ -185,21 +217,14 @@ class LLMChat(Provider):
|
|
|
185
217
|
return for_stream() if stream else for_non_stream()
|
|
186
218
|
|
|
187
219
|
def get_message(self, response: Dict[str, Any]) -> str:
|
|
188
|
-
"""Retrieves message
|
|
189
|
-
|
|
190
|
-
Args:
|
|
191
|
-
response (dict): Response generated by `self.ask`
|
|
192
|
-
|
|
193
|
-
Returns:
|
|
194
|
-
str: Message extracted.
|
|
195
|
-
"""
|
|
220
|
+
"""Retrieves message from response with validation"""
|
|
196
221
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
197
222
|
return response["text"]
|
|
198
223
|
|
|
199
|
-
|
|
200
224
|
if __name__ == "__main__":
|
|
201
225
|
from rich import print
|
|
202
|
-
|
|
226
|
+
# Enable logging for testing
|
|
227
|
+
ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct', logging=True)
|
|
203
228
|
response = ai.chat("What's the meaning of life?", stream=True)
|
|
204
229
|
for chunk in response:
|
|
205
|
-
print(chunk, end="", flush=True)
|
|
230
|
+
print(chunk, end="", flush=True)
|