webscout 6.2b0__py3-none-any.whl → 6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -176
- webscout/AIbase.py +112 -239
- webscout/AIutel.py +488 -1130
- webscout/Agents/functioncall.py +248 -198
- webscout/Bing_search.py +250 -153
- webscout/DWEBS.py +454 -178
- webscout/Extra/__init__.py +2 -1
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder_utiles.py +121 -0
- webscout/Extra/autocoder/rawdog.py +681 -0
- webscout/Extra/autollama.py +246 -195
- webscout/Extra/gguf.py +441 -226
- webscout/Extra/weather.py +172 -67
- webscout/LLM.py +442 -100
- webscout/Litlogger/__init__.py +681 -0
- webscout/Local/formats.py +4 -2
- webscout/Provider/Amigo.py +19 -10
- webscout/Provider/Andi.py +0 -33
- webscout/Provider/Blackboxai.py +4 -204
- webscout/Provider/DARKAI.py +1 -1
- webscout/Provider/EDITEE.py +1 -1
- webscout/Provider/Llama3.py +1 -1
- webscout/Provider/Marcus.py +137 -0
- webscout/Provider/NinjaChat.py +1 -1
- webscout/Provider/PI.py +221 -207
- webscout/Provider/Perplexity.py +598 -598
- webscout/Provider/RoboCoders.py +206 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
- webscout/Provider/TTI/__init__.py +3 -4
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +184 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
- webscout/Provider/TTI/blackbox/__init__.py +4 -0
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
- webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
- webscout/Provider/TTI/deepinfra/__init__.py +4 -0
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/imgninza/__init__.py +4 -0
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
- webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/__init__.py +146 -132
- webscout/Provider/askmyai.py +158 -0
- webscout/Provider/cerebras.py +227 -206
- webscout/Provider/geminiapi.py +208 -198
- webscout/Provider/llama3mitril.py +180 -0
- webscout/Provider/llmchat.py +203 -0
- webscout/Provider/mhystical.py +176 -0
- webscout/Provider/perplexitylabs.py +265 -0
- webscout/Provider/talkai.py +196 -0
- webscout/Provider/twitterclone.py +251 -244
- webscout/Provider/typegpt.py +359 -0
- webscout/__init__.py +28 -23
- webscout/__main__.py +5 -5
- webscout/cli.py +327 -347
- webscout/conversation.py +227 -0
- webscout/exceptions.py +161 -29
- webscout/litagent/__init__.py +172 -0
- webscout/litprinter/__init__.py +831 -0
- webscout/optimizers.py +270 -0
- webscout/prompt_manager.py +279 -0
- webscout/swiftcli/__init__.py +810 -0
- webscout/transcriber.py +479 -551
- webscout/update_checker.py +125 -0
- webscout/version.py +1 -1
- webscout-6.4.dist-info/LICENSE.md +211 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/METADATA +34 -55
- webscout-6.4.dist-info/RECORD +154 -0
- webscout/Provider/TTI/AIuncensored.py +0 -103
- webscout/Provider/TTI/Nexra.py +0 -120
- webscout/Provider/TTI/PollinationsAI.py +0 -138
- webscout/Provider/TTI/WebSimAI.py +0 -142
- webscout/Provider/TTI/aiforce.py +0 -160
- webscout/Provider/TTI/artbit.py +0 -141
- webscout/Provider/TTI/deepinfra.py +0 -148
- webscout/Provider/TTI/huggingface.py +0 -155
- webscout/models.py +0 -23
- webscout-6.2b0.dist-info/LICENSE.md +0 -50
- webscout-6.2b0.dist-info/RECORD +0 -118
- /webscout/{g4f.py → gpt4free.py} +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/WHEEL +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/entry_points.txt +0 -0
- {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/top_level.txt +0 -0
webscout/conversation.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
"""
|
|
2
|
+
>>> from conversation import Conversation
|
|
3
|
+
>>> chat = Conversation(max_tokens=500)
|
|
4
|
+
>>> chat.add_message("user", "Hello!")
|
|
5
|
+
>>> chat.add_message("llm", "Hi there!")
|
|
6
|
+
>>> prompt = chat.gen_complete_prompt("What's up?")
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
import logging
|
|
11
|
+
from typing import Optional
|
|
12
|
+
from .Litlogger import LitLogger, LogFormat, ColorScheme
|
|
13
|
+
|
|
14
|
+
# Create a logger instance for this module
|
|
15
|
+
logger = LitLogger(
|
|
16
|
+
name="Conversation",
|
|
17
|
+
format=LogFormat.MODERN_EMOJI,
|
|
18
|
+
color_scheme=ColorScheme.CYBERPUNK
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
class Conversation:
|
|
22
|
+
"""Handles prompt generation based on history and maintains chat context.
|
|
23
|
+
|
|
24
|
+
This class is responsible for managing chat conversations, including:
|
|
25
|
+
- Maintaining chat history
|
|
26
|
+
- Loading/saving conversations from/to files
|
|
27
|
+
- Generating prompts based on context
|
|
28
|
+
- Managing token limits and history pruning
|
|
29
|
+
|
|
30
|
+
Examples:
|
|
31
|
+
>>> chat = Conversation(max_tokens=500)
|
|
32
|
+
>>> chat.add_message("user", "Hello!")
|
|
33
|
+
>>> chat.add_message("llm", "Hi there!")
|
|
34
|
+
>>> prompt = chat.gen_complete_prompt("What's up?")
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
intro = (
|
|
38
|
+
"You're a Large Language Model for chatting with people. "
|
|
39
|
+
"Assume role of the LLM and give your response."
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
status: bool = True,
|
|
45
|
+
max_tokens: int = 600,
|
|
46
|
+
filepath: Optional[str] = None,
|
|
47
|
+
update_file: bool = True,
|
|
48
|
+
):
|
|
49
|
+
"""Initialize a new Conversation manager.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
status (bool): Flag to control history tracking. Defaults to True.
|
|
53
|
+
max_tokens (int): Maximum tokens for completion response. Defaults to 600.
|
|
54
|
+
filepath (str, optional): Path to save/load conversation history. Defaults to None.
|
|
55
|
+
update_file (bool): Whether to append new messages to file. Defaults to True.
|
|
56
|
+
|
|
57
|
+
Examples:
|
|
58
|
+
>>> chat = Conversation(max_tokens=500)
|
|
59
|
+
>>> chat = Conversation(filepath="chat_history.txt")
|
|
60
|
+
"""
|
|
61
|
+
self.status = status
|
|
62
|
+
self.max_tokens_to_sample = max_tokens
|
|
63
|
+
self.chat_history = "" # Initialize as empty string
|
|
64
|
+
self.history_format = "\nUser : %(user)s\nLLM :%(llm)s"
|
|
65
|
+
self.file = filepath
|
|
66
|
+
self.update_file = update_file
|
|
67
|
+
self.history_offset = 10250
|
|
68
|
+
self.prompt_allowance = 10
|
|
69
|
+
|
|
70
|
+
if filepath:
|
|
71
|
+
self.load_conversation(filepath, False)
|
|
72
|
+
|
|
73
|
+
def load_conversation(self, filepath: str, exists: bool = True) -> None:
|
|
74
|
+
"""Load conversation history from a text file.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
filepath (str): Path to the history file
|
|
78
|
+
exists (bool): Flag for file availability. Defaults to True.
|
|
79
|
+
|
|
80
|
+
Raises:
|
|
81
|
+
AssertionError: If filepath is not str or file doesn't exist
|
|
82
|
+
"""
|
|
83
|
+
assert isinstance(
|
|
84
|
+
filepath, str
|
|
85
|
+
), f"Filepath needs to be of str datatype not {type(filepath)}"
|
|
86
|
+
assert (
|
|
87
|
+
os.path.isfile(filepath) if exists else True
|
|
88
|
+
), f"File '{filepath}' does not exist"
|
|
89
|
+
|
|
90
|
+
if not os.path.isfile(filepath):
|
|
91
|
+
logging.debug(f"Creating new chat-history file - '{filepath}'")
|
|
92
|
+
with open(filepath, "w", encoding="utf-8") as fh:
|
|
93
|
+
fh.write(self.intro)
|
|
94
|
+
else:
|
|
95
|
+
logging.debug(f"Loading conversation from '{filepath}'")
|
|
96
|
+
with open(filepath, encoding="utf-8") as fh:
|
|
97
|
+
file_contents = fh.readlines()
|
|
98
|
+
if file_contents:
|
|
99
|
+
self.intro = file_contents[0] # First line is intro
|
|
100
|
+
self.chat_history = "\n".join(file_contents[1:])
|
|
101
|
+
|
|
102
|
+
def __trim_chat_history(self, chat_history: str, intro: str) -> str:
|
|
103
|
+
"""Keep the chat history fresh by trimming it when it gets too long!
|
|
104
|
+
|
|
105
|
+
This method makes sure we don't exceed our token limits by:
|
|
106
|
+
- Calculating total length (intro + history)
|
|
107
|
+
- Trimming older messages if needed
|
|
108
|
+
- Keeping the convo smooth and within limits
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
chat_history (str): The current chat history to trim
|
|
112
|
+
intro (str): The conversation's intro/system prompt
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
str: The trimmed chat history, ready to use!
|
|
116
|
+
|
|
117
|
+
Examples:
|
|
118
|
+
>>> chat = Conversation(max_tokens=500)
|
|
119
|
+
>>> trimmed = chat._Conversation__trim_chat_history("Hello! Hi!", "Intro")
|
|
120
|
+
"""
|
|
121
|
+
len_of_intro = len(intro)
|
|
122
|
+
len_of_chat_history = len(chat_history)
|
|
123
|
+
total = self.max_tokens_to_sample + len_of_intro + len_of_chat_history
|
|
124
|
+
|
|
125
|
+
if total > self.history_offset:
|
|
126
|
+
truncate_at = (total - self.history_offset) + self.prompt_allowance
|
|
127
|
+
trimmed_chat_history = chat_history[truncate_at:]
|
|
128
|
+
return "... " + trimmed_chat_history
|
|
129
|
+
return chat_history
|
|
130
|
+
|
|
131
|
+
def gen_complete_prompt(self, prompt: str, intro: Optional[str] = None) -> str:
|
|
132
|
+
"""Generate a complete prompt that's ready to go!
|
|
133
|
+
|
|
134
|
+
This method:
|
|
135
|
+
- Combines the intro, history, and new prompt
|
|
136
|
+
- Trims history if needed
|
|
137
|
+
- Keeps everything organized and flowing
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
prompt (str): Your message to add to the chat
|
|
141
|
+
intro (str, optional): Custom intro to use. Default: None (uses class intro)
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
str: The complete conversation prompt, ready for the LLM!
|
|
145
|
+
|
|
146
|
+
Examples:
|
|
147
|
+
>>> chat = Conversation()
|
|
148
|
+
>>> prompt = chat.gen_complete_prompt("What's good?")
|
|
149
|
+
"""
|
|
150
|
+
if not self.status:
|
|
151
|
+
return prompt
|
|
152
|
+
|
|
153
|
+
intro = intro or self.intro or (
|
|
154
|
+
"You're a Large Language Model for chatting with people. "
|
|
155
|
+
"Assume role of the LLM and give your response."
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
incomplete_chat_history = self.chat_history + self.history_format % {
|
|
159
|
+
"user": prompt,
|
|
160
|
+
"llm": ""
|
|
161
|
+
}
|
|
162
|
+
return intro + self.__trim_chat_history(incomplete_chat_history, intro)
|
|
163
|
+
|
|
164
|
+
def update_chat_history(
|
|
165
|
+
self, prompt: str, response: str, force: bool = False
|
|
166
|
+
) -> None:
|
|
167
|
+
"""Keep the conversation flowing by updating the chat history!
|
|
168
|
+
|
|
169
|
+
This method:
|
|
170
|
+
- Adds new messages to the history
|
|
171
|
+
- Updates the file if needed
|
|
172
|
+
- Keeps everything organized
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
prompt (str): Your message to add
|
|
176
|
+
response (str): The LLM's response
|
|
177
|
+
force (bool): Force update even if history is off. Default: False
|
|
178
|
+
|
|
179
|
+
Examples:
|
|
180
|
+
>>> chat = Conversation()
|
|
181
|
+
>>> chat.update_chat_history("Hi!", "Hello there!")
|
|
182
|
+
"""
|
|
183
|
+
if not self.status and not force:
|
|
184
|
+
return
|
|
185
|
+
|
|
186
|
+
new_history = self.history_format % {"user": prompt, "llm": response}
|
|
187
|
+
|
|
188
|
+
if self.file and self.update_file:
|
|
189
|
+
# Create file if it doesn't exist
|
|
190
|
+
if not os.path.exists(self.file):
|
|
191
|
+
with open(self.file, "w", encoding="utf-8") as fh:
|
|
192
|
+
fh.write(self.intro + "\n")
|
|
193
|
+
|
|
194
|
+
# Append new history
|
|
195
|
+
with open(self.file, "a", encoding="utf-8") as fh:
|
|
196
|
+
fh.write(new_history)
|
|
197
|
+
|
|
198
|
+
self.chat_history += new_history
|
|
199
|
+
|
|
200
|
+
def add_message(self, role: str, content: str) -> None:
|
|
201
|
+
"""Add a new message to the chat - simple and clean!
|
|
202
|
+
|
|
203
|
+
This method:
|
|
204
|
+
- Validates the message role
|
|
205
|
+
- Adds the message to history
|
|
206
|
+
- Updates file if needed
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
role (str): Who's sending? ('user', 'llm', 'tool', or 'reasoning')
|
|
210
|
+
content (str): What's the message?
|
|
211
|
+
|
|
212
|
+
Examples:
|
|
213
|
+
>>> chat = Conversation()
|
|
214
|
+
>>> chat.add_message("user", "Hey there!")
|
|
215
|
+
>>> chat.add_message("llm", "Hi! How can I help?")
|
|
216
|
+
"""
|
|
217
|
+
role_formats = {
|
|
218
|
+
"user": "User",
|
|
219
|
+
"llm": "LLM",
|
|
220
|
+
"tool": "Tool",
|
|
221
|
+
"reasoning": "Reasoning"
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
if role in role_formats:
|
|
225
|
+
self.chat_history += f"\n{role_formats[role]} : {content}"
|
|
226
|
+
else:
|
|
227
|
+
logger.warning(f"Unknown role '{role}' for message: {content}")
|
webscout/exceptions.py
CHANGED
|
@@ -1,29 +1,161 @@
|
|
|
1
|
-
class WebscoutE(Exception):
|
|
2
|
-
"""Base exception class for search."""
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
class
|
|
6
|
-
"""Raised
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
pass
|
|
1
|
+
class WebscoutE(Exception):
|
|
2
|
+
"""Base exception class for search."""
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class APIConnectionError(Exception):
|
|
6
|
+
"""Raised when there are connection issues with an API."""
|
|
7
|
+
pass
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class RatelimitE(Exception):
|
|
11
|
+
"""Raised for rate limit exceeded errors during API requests."""
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ConversationLimitException(Exception):
|
|
15
|
+
"""Raised for conversation limit exceeded errors during API requests."""
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class TimeoutE(Exception):
|
|
20
|
+
"""Raised for timeout errors during API requests."""
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class FailedToGenerateResponseError(Exception):
|
|
24
|
+
"""Provider failed to fetch response"""
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class AllProvidersFailure(Exception):
|
|
28
|
+
"""None of the providers generated response successfully"""
|
|
29
|
+
pass
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class FacebookInvalidCredentialsException(Exception):
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class FacebookRegionBlocked(Exception):
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ModelUnloadedException(Exception):
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class TranscriptRetrievalError(WebscoutE):
|
|
45
|
+
"""Base class for transcript retrieval errors."""
|
|
46
|
+
|
|
47
|
+
def __init__(self, video_id, message):
|
|
48
|
+
super().__init__(message.format(video_url=WATCH_URL.format(video_id=video_id)))
|
|
49
|
+
self.video_id = video_id
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class YouTubeRequestFailedError(TranscriptRetrievalError):
|
|
53
|
+
"""Raised when a request to YouTube fails."""
|
|
54
|
+
|
|
55
|
+
def __init__(self, video_id, http_error):
|
|
56
|
+
message = 'Request to YouTube failed: {reason}'
|
|
57
|
+
super().__init__(video_id, message.format(reason=str(http_error)))
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class VideoUnavailableError(TranscriptRetrievalError):
|
|
61
|
+
"""Raised when the video is unavailable."""
|
|
62
|
+
|
|
63
|
+
def __init__(self, video_id):
|
|
64
|
+
message = 'The video is no longer available'
|
|
65
|
+
super().__init__(video_id, message)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class InvalidVideoIdError(TranscriptRetrievalError):
|
|
69
|
+
"""Raised when an invalid video ID is provided."""
|
|
70
|
+
|
|
71
|
+
def __init__(self, video_id):
|
|
72
|
+
message = (
|
|
73
|
+
'You provided an invalid video id. Make sure you are using the video id and NOT the url!\n\n'
|
|
74
|
+
'Do NOT run: `YTTranscriber.get_transcript("https://www.youtube.com/watch?v=1234")`\n'
|
|
75
|
+
'Instead run: `YTTranscriber.get_transcript("1234")`'
|
|
76
|
+
)
|
|
77
|
+
super().__init__(video_id, message)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class TooManyRequestsError(TranscriptRetrievalError):
|
|
81
|
+
"""Raised when YouTube rate limits the requests."""
|
|
82
|
+
|
|
83
|
+
def __init__(self, video_id):
|
|
84
|
+
message = (
|
|
85
|
+
'YouTube is receiving too many requests from this IP and now requires solving a captcha to continue. '
|
|
86
|
+
'One of the following things can be done to work around this:\n\
|
|
87
|
+
- Manually solve the captcha in a browser and export the cookie. '
|
|
88
|
+
'- Use a different IP address\n\
|
|
89
|
+
- Wait until the ban on your IP has been lifted'
|
|
90
|
+
)
|
|
91
|
+
super().__init__(video_id, message)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class TranscriptsDisabledError(TranscriptRetrievalError):
|
|
95
|
+
"""Raised when transcripts are disabled for the video."""
|
|
96
|
+
|
|
97
|
+
def __init__(self, video_id):
|
|
98
|
+
message = 'Subtitles are disabled for this video'
|
|
99
|
+
super().__init__(video_id, message)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class NoTranscriptAvailableError(TranscriptRetrievalError):
|
|
103
|
+
"""Raised when no transcripts are available for the video."""
|
|
104
|
+
|
|
105
|
+
def __init__(self, video_id):
|
|
106
|
+
message = 'No transcripts are available for this video'
|
|
107
|
+
super().__init__(video_id, message)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class NotTranslatableError(TranscriptRetrievalError):
|
|
111
|
+
"""Raised when the transcript is not translatable."""
|
|
112
|
+
|
|
113
|
+
def __init__(self, video_id):
|
|
114
|
+
message = 'The requested language is not translatable'
|
|
115
|
+
super().__init__(video_id, message)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class TranslationLanguageNotAvailableError(TranscriptRetrievalError):
|
|
119
|
+
"""Raised when the requested translation language is not available."""
|
|
120
|
+
|
|
121
|
+
def __init__(self, video_id):
|
|
122
|
+
message = 'The requested translation language is not available'
|
|
123
|
+
super().__init__(video_id, message)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class CookiePathInvalidError(TranscriptRetrievalError):
|
|
127
|
+
"""Raised when the cookie path is invalid."""
|
|
128
|
+
|
|
129
|
+
def __init__(self, video_id):
|
|
130
|
+
message = 'The provided cookie path is invalid'
|
|
131
|
+
super().__init__(video_id, message)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class CookiesInvalidError(TranscriptRetrievalError):
|
|
135
|
+
"""Raised when the provided cookies are invalid."""
|
|
136
|
+
|
|
137
|
+
def __init__(self, video_id):
|
|
138
|
+
message = 'The cookies provided are not valid (may have expired)'
|
|
139
|
+
super().__init__(video_id, message)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class FailedToCreateConsentCookieError(TranscriptRetrievalError):
|
|
143
|
+
"""Raised when consent cookie creation fails."""
|
|
144
|
+
|
|
145
|
+
def __init__(self, video_id):
|
|
146
|
+
message = 'Failed to automatically give consent to saving cookies'
|
|
147
|
+
super().__init__(video_id, message)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class NoTranscriptFoundError(TranscriptRetrievalError):
|
|
151
|
+
"""Raised when no transcript is found for the requested language codes."""
|
|
152
|
+
|
|
153
|
+
def __init__(self, video_id, requested_language_codes, transcript_data):
|
|
154
|
+
message = (
|
|
155
|
+
'No transcripts were found for any of the requested language codes: {requested_language_codes}\n\n'
|
|
156
|
+
'{transcript_data}'
|
|
157
|
+
)
|
|
158
|
+
super().__init__(video_id, message.format(
|
|
159
|
+
requested_language_codes=requested_language_codes,
|
|
160
|
+
transcript_data=str(transcript_data)
|
|
161
|
+
))
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LitAgent - A lit user agent generator with infinite possibilities! 🔥
|
|
3
|
+
|
|
4
|
+
Examples:
|
|
5
|
+
>>> from webscout import LitAgent
|
|
6
|
+
>>> agent = LitAgent()
|
|
7
|
+
>>>
|
|
8
|
+
>>> # Get random user agents
|
|
9
|
+
>>> agent.random() # Random agent from any browser
|
|
10
|
+
>>> agent.mobile() # Random mobile device agent
|
|
11
|
+
>>> agent.desktop() # Random desktop agent
|
|
12
|
+
>>>
|
|
13
|
+
>>> # Browser specific agents
|
|
14
|
+
>>> agent.chrome() # Latest Chrome browser
|
|
15
|
+
>>> agent.firefox() # Latest Firefox browser
|
|
16
|
+
>>> agent.safari() # Latest Safari browser
|
|
17
|
+
>>> agent.edge() # Latest Edge browser
|
|
18
|
+
>>> agent.opera() # Latest Opera browser
|
|
19
|
+
>>>
|
|
20
|
+
>>> # Refresh your agents
|
|
21
|
+
>>> agent.refresh() # Get fresh new agents
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
import random
|
|
25
|
+
from typing import Optional, List, Dict
|
|
26
|
+
from ..Litlogger import LitLogger, LogFormat, ColorScheme
|
|
27
|
+
|
|
28
|
+
logger = LitLogger(
|
|
29
|
+
name="LitAgent",
|
|
30
|
+
format=LogFormat.MODERN_EMOJI,
|
|
31
|
+
color_scheme=ColorScheme.CYBERPUNK
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
class LitAgent:
|
|
35
|
+
"""A lit user agent generator that keeps it fresh! 🌟"""
|
|
36
|
+
|
|
37
|
+
# Browser versions we support
|
|
38
|
+
BROWSERS = {
|
|
39
|
+
"chrome": (48, 120),
|
|
40
|
+
"firefox": (48, 121),
|
|
41
|
+
"safari": (605, 617),
|
|
42
|
+
"edge": (79, 120),
|
|
43
|
+
"opera": (48, 104)
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
# OS versions
|
|
47
|
+
OS_VERSIONS = {
|
|
48
|
+
"windows": ["10.0", "11.0"],
|
|
49
|
+
"mac": ["10_15_7", "11_0", "12_0", "13_0", "14_0"],
|
|
50
|
+
"linux": ["x86_64", "i686"],
|
|
51
|
+
"android": ["10", "11", "12", "13", "14"],
|
|
52
|
+
"ios": ["14_0", "15_0", "16_0", "17_0"]
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
# Device types
|
|
56
|
+
DEVICES = {
|
|
57
|
+
"mobile": [
|
|
58
|
+
"iPhone", "iPad", "Samsung Galaxy", "Google Pixel",
|
|
59
|
+
"OnePlus", "Xiaomi", "Huawei", "OPPO", "Vivo"
|
|
60
|
+
],
|
|
61
|
+
"desktop": ["Windows PC", "MacBook", "iMac", "Linux Desktop"],
|
|
62
|
+
"tablet": ["iPad", "Samsung Galaxy Tab", "Microsoft Surface"],
|
|
63
|
+
"console": ["PlayStation 5", "Xbox Series X", "Nintendo Switch"],
|
|
64
|
+
"tv": ["Samsung Smart TV", "LG WebOS", "Android TV", "Apple TV"]
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
def __init__(self):
|
|
68
|
+
"""Initialize LitAgent with style! 💫"""
|
|
69
|
+
self.agents = self._generate_agents(100) # Keep 100 agents in memory
|
|
70
|
+
|
|
71
|
+
def _generate_agents(self, count: int) -> List[str]:
|
|
72
|
+
"""Generate some lit user agents! 🛠️"""
|
|
73
|
+
agents = []
|
|
74
|
+
for _ in range(count):
|
|
75
|
+
browser = random.choice(list(self.BROWSERS.keys()))
|
|
76
|
+
version = random.randint(*self.BROWSERS[browser])
|
|
77
|
+
|
|
78
|
+
if browser in ['chrome', 'firefox', 'edge', 'opera']:
|
|
79
|
+
os_type = random.choice(['windows', 'mac', 'linux'])
|
|
80
|
+
os_ver = random.choice(self.OS_VERSIONS[os_type])
|
|
81
|
+
|
|
82
|
+
if os_type == 'windows':
|
|
83
|
+
platform = f"Windows NT {os_ver}"
|
|
84
|
+
elif os_type == 'mac':
|
|
85
|
+
platform = f"Macintosh; Intel Mac OS X {os_ver}"
|
|
86
|
+
else:
|
|
87
|
+
platform = f"X11; Linux {os_ver}"
|
|
88
|
+
|
|
89
|
+
agent = f"Mozilla/5.0 ({platform}) AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
90
|
+
if browser == 'chrome':
|
|
91
|
+
agent += f"Chrome/{version}.0.0.0 Safari/537.36"
|
|
92
|
+
elif browser == 'firefox':
|
|
93
|
+
agent += f"Firefox/{version}.0"
|
|
94
|
+
elif browser == 'edge':
|
|
95
|
+
agent += f"Edge/{version}.0.0.0"
|
|
96
|
+
elif browser == 'opera':
|
|
97
|
+
agent += f"OPR/{version}.0.0.0"
|
|
98
|
+
|
|
99
|
+
elif browser == 'safari':
|
|
100
|
+
device = random.choice(['mac', 'ios'])
|
|
101
|
+
if device == 'mac':
|
|
102
|
+
ver = random.choice(self.OS_VERSIONS['mac'])
|
|
103
|
+
agent = f"Mozilla/5.0 (Macintosh; Intel Mac OS X {ver}) "
|
|
104
|
+
else:
|
|
105
|
+
ver = random.choice(self.OS_VERSIONS['ios'])
|
|
106
|
+
device = random.choice(['iPhone', 'iPad'])
|
|
107
|
+
agent = f"Mozilla/5.0 ({device}; CPU OS {ver} like Mac OS X) "
|
|
108
|
+
agent += f"AppleWebKit/{version}.1.15 (KHTML, like Gecko) Version/{version//100}.0 Safari/{version}.1.15"
|
|
109
|
+
|
|
110
|
+
agents.append(agent)
|
|
111
|
+
|
|
112
|
+
return list(set(agents)) # Remove any duplicates
|
|
113
|
+
|
|
114
|
+
def random(self) -> str:
|
|
115
|
+
"""Get a random user agent! 🎲"""
|
|
116
|
+
return random.choice(self.agents)
|
|
117
|
+
|
|
118
|
+
def browser(self, name: str) -> str:
|
|
119
|
+
"""Get a browser-specific agent! 🌐"""
|
|
120
|
+
name = name.lower()
|
|
121
|
+
if name not in self.BROWSERS:
|
|
122
|
+
logger.warning(f"Unknown browser: {name} - Using random browser")
|
|
123
|
+
return self.random()
|
|
124
|
+
|
|
125
|
+
agents = [a for a in self.agents if name in a.lower()]
|
|
126
|
+
return random.choice(agents) if agents else self.random()
|
|
127
|
+
|
|
128
|
+
def mobile(self) -> str:
|
|
129
|
+
"""Get a mobile device agent! 📱"""
|
|
130
|
+
agents = [a for a in self.agents if any(d in a for d in self.DEVICES['mobile'])]
|
|
131
|
+
return random.choice(agents) if agents else self.random()
|
|
132
|
+
|
|
133
|
+
def desktop(self) -> str:
|
|
134
|
+
"""Get a desktop agent! 💻"""
|
|
135
|
+
agents = [a for a in self.agents if 'Windows' in a or 'Macintosh' in a or 'Linux' in a]
|
|
136
|
+
return random.choice(agents) if agents else self.random()
|
|
137
|
+
|
|
138
|
+
def chrome(self) -> str:
|
|
139
|
+
"""Get a Chrome agent! 🌐"""
|
|
140
|
+
return self.browser('chrome')
|
|
141
|
+
|
|
142
|
+
def firefox(self) -> str:
|
|
143
|
+
"""Get a Firefox agent! 🦊"""
|
|
144
|
+
return self.browser('firefox')
|
|
145
|
+
|
|
146
|
+
def safari(self) -> str:
|
|
147
|
+
"""Get a Safari agent! 🧭"""
|
|
148
|
+
return self.browser('safari')
|
|
149
|
+
|
|
150
|
+
def edge(self) -> str:
|
|
151
|
+
"""Get an Edge agent! 📐"""
|
|
152
|
+
return self.browser('edge')
|
|
153
|
+
|
|
154
|
+
def opera(self) -> str:
|
|
155
|
+
"""Get an Opera agent! 🎭"""
|
|
156
|
+
return self.browser('opera')
|
|
157
|
+
|
|
158
|
+
def refresh(self) -> None:
|
|
159
|
+
"""Refresh the agents with new ones! 🔄"""
|
|
160
|
+
self.agents = self._generate_agents(100)
|
|
161
|
+
|
|
162
|
+
agent = LitAgent()
|
|
163
|
+
|
|
164
|
+
if __name__ == "__main__":
|
|
165
|
+
# Test it out! 🧪
|
|
166
|
+
|
|
167
|
+
print("Random:", agent.random())
|
|
168
|
+
print("Chrome:", agent.chrome())
|
|
169
|
+
print("Firefox:", agent.firefox())
|
|
170
|
+
print("Safari:", agent.safari())
|
|
171
|
+
print("Mobile:", agent.mobile())
|
|
172
|
+
print("Desktop:", agent.desktop())
|