webscout 6.3__py3-none-any.whl → 6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (131) hide show
  1. webscout/AIauto.py +191 -176
  2. webscout/AIbase.py +0 -197
  3. webscout/AIutel.py +441 -1130
  4. webscout/DWEBS.py +189 -35
  5. webscout/{YTdownloader.py → Extra/YTToolkit/YTdownloader.py} +990 -1103
  6. webscout/Extra/YTToolkit/__init__.py +3 -0
  7. webscout/{transcriber.py → Extra/YTToolkit/transcriber.py} +479 -551
  8. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  10. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  11. webscout/Extra/YTToolkit/ytapi/extras.py +45 -0
  12. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  13. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  14. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  15. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  16. webscout/Extra/YTToolkit/ytapi/query.py +37 -0
  17. webscout/Extra/YTToolkit/ytapi/stream.py +60 -0
  18. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  19. webscout/Extra/YTToolkit/ytapi/video.py +102 -0
  20. webscout/Extra/__init__.py +3 -1
  21. webscout/Extra/autocoder/__init__.py +9 -0
  22. webscout/Extra/autocoder/autocoder_utiles.py +121 -0
  23. webscout/Extra/autocoder/rawdog.py +680 -0
  24. webscout/Extra/autollama.py +246 -195
  25. webscout/Extra/gguf.py +81 -56
  26. webscout/Extra/markdownlite/__init__.py +862 -0
  27. webscout/Extra/weather_ascii.py +2 -2
  28. webscout/LLM.py +206 -43
  29. webscout/Litlogger/__init__.py +681 -0
  30. webscout/Provider/DARKAI.py +1 -1
  31. webscout/Provider/EDITEE.py +1 -1
  32. webscout/Provider/NinjaChat.py +1 -1
  33. webscout/Provider/PI.py +120 -35
  34. webscout/Provider/Perplexity.py +590 -598
  35. webscout/Provider/Reka.py +0 -1
  36. webscout/Provider/RoboCoders.py +206 -0
  37. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  38. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
  39. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
  40. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  41. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  42. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  43. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  44. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
  45. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
  46. webscout/Provider/TTI/__init__.py +2 -4
  47. webscout/Provider/TTI/artbit/__init__.py +22 -0
  48. webscout/Provider/TTI/artbit/async_artbit.py +184 -0
  49. webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
  50. webscout/Provider/TTI/blackbox/__init__.py +4 -0
  51. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
  52. webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
  53. webscout/Provider/TTI/deepinfra/__init__.py +4 -0
  54. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
  55. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
  56. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  57. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  58. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  59. webscout/Provider/TTI/imgninza/__init__.py +4 -0
  60. webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
  61. webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
  62. webscout/Provider/TTI/talkai/__init__.py +4 -0
  63. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  64. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  65. webscout/Provider/TTS/__init__.py +5 -1
  66. webscout/Provider/TTS/deepgram.py +183 -0
  67. webscout/Provider/TTS/elevenlabs.py +137 -0
  68. webscout/Provider/TTS/gesserit.py +151 -0
  69. webscout/Provider/TTS/murfai.py +139 -0
  70. webscout/Provider/TTS/parler.py +134 -107
  71. webscout/Provider/TTS/streamElements.py +360 -275
  72. webscout/Provider/TTS/utils.py +280 -0
  73. webscout/Provider/TTS/voicepod.py +116 -116
  74. webscout/Provider/__init__.py +8 -1
  75. webscout/Provider/askmyai.py +2 -2
  76. webscout/Provider/cerebras.py +227 -219
  77. webscout/Provider/llama3mitril.py +0 -1
  78. webscout/Provider/meta.py +794 -779
  79. webscout/Provider/mhystical.py +176 -0
  80. webscout/Provider/perplexitylabs.py +265 -0
  81. webscout/Provider/twitterclone.py +251 -245
  82. webscout/Provider/typegpt.py +358 -0
  83. webscout/__init__.py +9 -8
  84. webscout/__main__.py +5 -5
  85. webscout/cli.py +252 -280
  86. webscout/conversation.py +227 -0
  87. webscout/exceptions.py +161 -29
  88. webscout/litagent/__init__.py +172 -0
  89. webscout/litprinter/__init__.py +832 -0
  90. webscout/optimizers.py +270 -0
  91. webscout/prompt_manager.py +279 -0
  92. webscout/scout/__init__.py +11 -0
  93. webscout/scout/core.py +884 -0
  94. webscout/scout/element.py +459 -0
  95. webscout/scout/parsers/__init__.py +69 -0
  96. webscout/scout/parsers/html5lib_parser.py +172 -0
  97. webscout/scout/parsers/html_parser.py +236 -0
  98. webscout/scout/parsers/lxml_parser.py +178 -0
  99. webscout/scout/utils.py +38 -0
  100. webscout/swiftcli/__init__.py +810 -0
  101. webscout/update_checker.py +125 -0
  102. webscout/version.py +1 -1
  103. webscout/zeroart/__init__.py +55 -0
  104. webscout/zeroart/base.py +61 -0
  105. webscout/zeroart/effects.py +99 -0
  106. webscout/zeroart/fonts.py +816 -0
  107. webscout/zerodir/__init__.py +225 -0
  108. {webscout-6.3.dist-info → webscout-6.5.dist-info}/METADATA +37 -112
  109. webscout-6.5.dist-info/RECORD +179 -0
  110. webscout/Agents/Onlinesearcher.py +0 -182
  111. webscout/Agents/__init__.py +0 -2
  112. webscout/Agents/functioncall.py +0 -248
  113. webscout/Bing_search.py +0 -154
  114. webscout/Provider/TTI/AIuncensoredimage.py +0 -103
  115. webscout/Provider/TTI/Nexra.py +0 -120
  116. webscout/Provider/TTI/PollinationsAI.py +0 -138
  117. webscout/Provider/TTI/WebSimAI.py +0 -142
  118. webscout/Provider/TTI/aiforce.py +0 -160
  119. webscout/Provider/TTI/artbit.py +0 -141
  120. webscout/Provider/TTI/deepinfra.py +0 -148
  121. webscout/Provider/TTI/huggingface.py +0 -155
  122. webscout/Provider/TTI/talkai.py +0 -116
  123. webscout/g4f.py +0 -666
  124. webscout/models.py +0 -23
  125. webscout/requestsHTMLfix.py +0 -775
  126. webscout/webai.py +0 -2590
  127. webscout-6.3.dist-info/RECORD +0 -124
  128. {webscout-6.3.dist-info → webscout-6.5.dist-info}/LICENSE.md +0 -0
  129. {webscout-6.3.dist-info → webscout-6.5.dist-info}/WHEEL +0 -0
  130. {webscout-6.3.dist-info → webscout-6.5.dist-info}/entry_points.txt +0 -0
  131. {webscout-6.3.dist-info → webscout-6.5.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  import requests
2
2
  from rich.console import Console
3
- from pyfiglet import figlet_format
3
+ from webscout.zeroart import figlet_format
4
4
 
5
5
  console = Console()
6
6
  def get(location):
@@ -12,7 +12,7 @@ def get(location):
12
12
  str: ASCII art weather report if the request is successful,
13
13
  otherwise an error message.
14
14
  """
15
- console.print(f"[bold green]{figlet_format('Weather')}[/]\n", justify="center")
15
+ console.print(f"[bold green]{figlet_format('Weather')}")
16
16
  url = f"https://wttr.in/{location}"
17
17
  response = requests.get(url, headers={'User-Agent': 'curl'})
18
18
 
webscout/LLM.py CHANGED
@@ -1,14 +1,53 @@
1
+ """
2
+ >>> from webscout.LLM import LLM, VLM
3
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
4
+ >>> response = llm.chat([{"role": "user", "content": "What's good?"}])
5
+ >>> print(response)
6
+ 'Hey! I'm doing great, thanks for asking! How can I help you today? 😊'
7
+
8
+ >>> # For vision tasks
9
+ >>> vlm = VLM("cogvlm-grounding-generalist")
10
+ >>> response = vlm.chat([{"role": "user", "content": [{"type": "image", "image_url": "path/to/image.jpg"}, {"type": "text", "text": "What's in this image?"}]}])
11
+ """
12
+
1
13
  import requests
2
14
  import base64
3
15
  import json
4
16
  from typing import List, Dict, Union, Generator, Optional, Any
5
17
 
6
18
  class LLMError(Exception):
7
- """Custom exception for LLM API errors"""
19
+ """Custom exception for LLM API errors 🚫
20
+
21
+ Examples:
22
+ >>> try:
23
+ ... raise LLMError("API key not found!")
24
+ ... except LLMError as e:
25
+ ... print(f"Error: {e}")
26
+ Error: API key not found!
27
+ """
8
28
  pass
9
29
 
10
30
  class LLM:
11
- """A class for interacting with the DeepInfra LLM API."""
31
+ """A class for chatting with DeepInfra's powerful language models! 🚀
32
+
33
+ This class lets you:
34
+ - Chat with state-of-the-art language models 💬
35
+ - Stream responses in real-time ⚡
36
+ - Control temperature and token limits 🎮
37
+ - Handle system messages and chat history 📝
38
+
39
+ Examples:
40
+ >>> from webscout.LLM import LLM
41
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
42
+ >>> response = llm.chat([
43
+ ... {"role": "user", "content": "Write a short poem!"}
44
+ ... ])
45
+ >>> print(response)
46
+ 'Through starlit skies and morning dew,
47
+ Nature's beauty, forever new.
48
+ In every moment, magic gleams,
49
+ Life's poetry flows like gentle streams.'
50
+ """
12
51
 
13
52
  def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
14
53
  """
@@ -17,6 +56,11 @@ class LLM:
17
56
  Args:
18
57
  model: The model identifier (e.g., "meta-llama/Meta-Llama-3-70B-Instruct")
19
58
  system_message: The system message to use for the conversation
59
+
60
+ Examples:
61
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
62
+ >>> print(llm.model)
63
+ 'meta-llama/Meta-Llama-3-70B-Instruct'
20
64
  """
21
65
  self.model = model
22
66
  self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
@@ -48,7 +92,26 @@ class LLM:
48
92
  max_tokens: int = 8028,
49
93
  stop: Optional[List[str]] = None,
50
94
  ) -> Dict[str, Any]:
51
- """Prepare the API request payload."""
95
+ """Prepare the chat payload with all the right settings! 🎯
96
+
97
+ Args:
98
+ messages: Your chat messages (role & content)
99
+ stream: Want real-time responses? Set True! ⚡
100
+ temperature: Creativity level (0-1) 🎨
101
+ max_tokens: Max words to generate 📝
102
+ stop: Words to stop at (optional) 🛑
103
+
104
+ Returns:
105
+ Dict with all the API settings ready to go! 🚀
106
+
107
+ Examples:
108
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
109
+ >>> payload = llm._prepare_payload([
110
+ ... {"role": "user", "content": "Hi!"}
111
+ ... ])
112
+ >>> print(payload['model'])
113
+ 'meta-llama/Meta-Llama-3-70B-Instruct'
114
+ """
52
115
  return {
53
116
  'model': self.model,
54
117
  'messages': messages,
@@ -66,21 +129,38 @@ class LLM:
66
129
  max_tokens: int = 8028,
67
130
  stop: Optional[List[str]] = None,
68
131
  ) -> Union[str, Generator[str, None, None]]:
69
- """
70
- Send a chat request to the DeepInfra API.
71
-
132
+ """Start chatting with the AI! 💬
133
+
134
+ This method is your gateway to:
135
+ - Having awesome conversations 🗣️
136
+ - Getting creative responses 🎨
137
+ - Streaming real-time replies ⚡
138
+ - Controlling the output style 🎮
139
+
72
140
  Args:
73
- messages: List of message dictionaries with 'role' and 'content'
74
- stream: Whether to stream the response
75
- temperature: Sampling temperature (0-1)
76
- max_tokens: Maximum tokens to generate
77
- stop: Optional list of stop sequences
78
-
141
+ messages: Your chat messages (role & content)
142
+ stream: Want real-time responses? Set True!
143
+ temperature: Creativity level (0-1)
144
+ max_tokens: Max words to generate
145
+ stop: Words to stop at (optional)
146
+
79
147
  Returns:
80
- Either a string response or a generator for streaming
81
-
148
+ Either a complete response or streaming generator
149
+
82
150
  Raises:
83
- LLMError: If the API request fails
151
+ LLMError: If something goes wrong 🚫
152
+
153
+ Examples:
154
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
155
+ >>> # Regular chat
156
+ >>> response = llm.chat([
157
+ ... {"role": "user", "content": "Tell me a joke!"}
158
+ ... ])
159
+ >>> # Streaming chat
160
+ >>> for chunk in llm.chat([
161
+ ... {"role": "user", "content": "Tell me a story!"}
162
+ ... ], stream=True):
163
+ ... print(chunk, end='')
84
164
  """
85
165
  payload = self._prepare_payload(messages, stream, temperature, max_tokens, stop)
86
166
 
@@ -93,7 +173,24 @@ class LLM:
93
173
  raise LLMError(f"API request failed: {str(e)}")
94
174
 
95
175
  def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
96
- """Stream the chat response."""
176
+ """Stream the chat response in real-time! ⚡
177
+
178
+ Args:
179
+ payload: The prepared chat payload
180
+
181
+ Yields:
182
+ Streaming chunks of the response
183
+
184
+ Raises:
185
+ LLMError: If the stream request fails 🚫
186
+
187
+ Examples:
188
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
189
+ >>> for chunk in llm._stream_response(llm._prepare_payload([
190
+ ... {"role": "user", "content": "Tell me a story!"}
191
+ ... ])):
192
+ ... print(chunk, end='')
193
+ """
97
194
  try:
98
195
  with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
99
196
  response.raise_for_status()
@@ -112,7 +209,24 @@ class LLM:
112
209
  raise LLMError(f"Stream request failed: {str(e)}")
113
210
 
114
211
  def _send_request(self, payload: Dict[str, Any]) -> str:
115
- """Send a non-streaming chat request."""
212
+ """Send a non-streaming chat request.
213
+
214
+ Args:
215
+ payload: The prepared chat payload
216
+
217
+ Returns:
218
+ The complete response
219
+
220
+ Raises:
221
+ LLMError: If the request fails 🚫
222
+
223
+ Examples:
224
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
225
+ >>> response = llm._send_request(llm._prepare_payload([
226
+ ... {"role": "user", "content": "Tell me a joke!"}
227
+ ... ]))
228
+ >>> print(response)
229
+ """
116
230
  try:
117
231
  response = requests.post(self.api_url, json=payload, headers=self.headers)
118
232
  response.raise_for_status()
@@ -127,15 +241,40 @@ class LLM:
127
241
 
128
242
 
129
243
  class VLM:
130
- """A class for interacting with the DeepInfra VLM (Vision Language Model) API."""
131
-
244
+ """Your gateway to vision-language AI magic! 🖼️
245
+
246
+ This class lets you:
247
+ - Chat about images with AI 🎨
248
+ - Get detailed image descriptions 📝
249
+ - Answer questions about images 🤔
250
+ - Stream responses in real-time ⚡
251
+
252
+ Examples:
253
+ >>> from webscout.LLM import VLM
254
+ >>> vlm = VLM("cogvlm-grounding-generalist")
255
+ >>> # Chat about an image
256
+ >>> response = vlm.chat([{
257
+ ... "role": "user",
258
+ ... "content": [
259
+ ... {"type": "image", "image_url": "path/to/image.jpg"},
260
+ ... {"type": "text", "text": "What's in this image?"}
261
+ ... ]
262
+ ... }])
263
+ >>> print(response)
264
+ 'I see a beautiful sunset over mountains...'
265
+ """
266
+
132
267
  def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
133
- """
134
- Initialize the VLM client.
135
-
268
+ """Get ready for some vision-language magic! 🚀
269
+
136
270
  Args:
137
- model: The model identifier
138
- system_message: The system message to use for the conversation
271
+ model: Your chosen vision model
272
+ system_message: Set the AI's personality
273
+
274
+ Examples:
275
+ >>> vlm = VLM("cogvlm-grounding-generalist")
276
+ >>> print(vlm.model)
277
+ 'cogvlm-grounding-generalist'
139
278
  """
140
279
  self.model = model
141
280
  self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
@@ -166,20 +305,39 @@ class VLM:
166
305
  temperature: float = 0.7,
167
306
  max_tokens: int = 8028,
168
307
  ) -> Union[str, Generator[str, None, None]]:
169
- """
170
- Send a chat request with image support to the DeepInfra API.
171
-
308
+ """Chat about images with AI! 🖼️
309
+
310
+ This method lets you:
311
+ - Ask questions about images 🤔
312
+ - Get detailed descriptions 📝
313
+ - Stream responses in real-time ⚡
314
+ - Control response creativity 🎨
315
+
172
316
  Args:
173
- messages: List of message dictionaries that may include image data
174
- stream: Whether to stream the response
175
- temperature: Sampling temperature (0-1)
176
- max_tokens: Maximum tokens to generate
177
-
317
+ messages: Your chat + image data
318
+ stream: Want real-time responses?
319
+ temperature: Creativity level (0-1)
320
+ max_tokens: Max words to generate
321
+
178
322
  Returns:
179
- Either a string response or a generator for streaming
180
-
323
+ Either a complete response or streaming generator
324
+
181
325
  Raises:
182
- LLMError: If the API request fails
326
+ LLMError: If something goes wrong 🚫
327
+
328
+ Examples:
329
+ >>> vlm = VLM("cogvlm-grounding-generalist")
330
+ >>> # Regular chat with image
331
+ >>> response = vlm.chat([{
332
+ ... "role": "user",
333
+ ... "content": [
334
+ ... {"type": "image", "image_url": "sunset.jpg"},
335
+ ... {"type": "text", "text": "Describe this scene"}
336
+ ... ]
337
+ ... }])
338
+ >>> # Streaming chat
339
+ >>> for chunk in vlm.chat([...], stream=True):
340
+ ... print(chunk, end='')
183
341
  """
184
342
  payload = {
185
343
  "model": self.model,
@@ -232,17 +390,22 @@ class VLM:
232
390
 
233
391
 
234
392
  def encode_image_to_base64(image_path: str) -> str:
235
- """
236
- Encode an image file to base64 string.
237
-
393
+ """Turn your image into base64 magic! 🎨
394
+
238
395
  Args:
239
- image_path: Path to the image file
240
-
396
+ image_path: Where's your image at?
397
+
241
398
  Returns:
242
- Base64 encoded string of the image
243
-
399
+ Your image as a base64 string ✨
400
+
244
401
  Raises:
245
- IOError: If the image file cannot be read
402
+ IOError: If we can't read your image 🚫
403
+
404
+ Examples:
405
+ >>> from webscout.LLM import encode_image_to_base64
406
+ >>> image_data = encode_image_to_base64("cool_pic.jpg")
407
+ >>> print(len(image_data)) # Check the encoded length
408
+ 12345
246
409
  """
247
410
  try:
248
411
  with open(image_path, "rb") as image_file: