webscout 6.3__py3-none-any.whl → 6.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -176
- webscout/AIbase.py +0 -197
- webscout/AIutel.py +441 -1130
- webscout/DWEBS.py +189 -35
- webscout/{YTdownloader.py → Extra/YTToolkit/YTdownloader.py} +990 -1103
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/{transcriber.py → Extra/YTToolkit/transcriber.py} +479 -551
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +45 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +37 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +60 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +102 -0
- webscout/Extra/__init__.py +3 -1
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder_utiles.py +121 -0
- webscout/Extra/autocoder/rawdog.py +680 -0
- webscout/Extra/autollama.py +246 -195
- webscout/Extra/gguf.py +81 -56
- webscout/Extra/markdownlite/__init__.py +862 -0
- webscout/Extra/weather_ascii.py +2 -2
- webscout/LLM.py +206 -43
- webscout/Litlogger/__init__.py +681 -0
- webscout/Provider/DARKAI.py +1 -1
- webscout/Provider/EDITEE.py +1 -1
- webscout/Provider/NinjaChat.py +1 -1
- webscout/Provider/PI.py +120 -35
- webscout/Provider/Perplexity.py +590 -598
- webscout/Provider/Reka.py +0 -1
- webscout/Provider/RoboCoders.py +206 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
- webscout/Provider/TTI/__init__.py +2 -4
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +184 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
- webscout/Provider/TTI/blackbox/__init__.py +4 -0
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
- webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
- webscout/Provider/TTI/deepinfra/__init__.py +4 -0
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/imgninza/__init__.py +4 -0
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
- webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/__init__.py +5 -1
- webscout/Provider/TTS/deepgram.py +183 -0
- webscout/Provider/TTS/elevenlabs.py +137 -0
- webscout/Provider/TTS/gesserit.py +151 -0
- webscout/Provider/TTS/murfai.py +139 -0
- webscout/Provider/TTS/parler.py +134 -107
- webscout/Provider/TTS/streamElements.py +360 -275
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/__init__.py +8 -1
- webscout/Provider/askmyai.py +2 -2
- webscout/Provider/cerebras.py +227 -219
- webscout/Provider/llama3mitril.py +0 -1
- webscout/Provider/meta.py +794 -779
- webscout/Provider/mhystical.py +176 -0
- webscout/Provider/perplexitylabs.py +265 -0
- webscout/Provider/twitterclone.py +251 -245
- webscout/Provider/typegpt.py +358 -0
- webscout/__init__.py +9 -8
- webscout/__main__.py +5 -5
- webscout/cli.py +252 -280
- webscout/conversation.py +227 -0
- webscout/exceptions.py +161 -29
- webscout/litagent/__init__.py +172 -0
- webscout/litprinter/__init__.py +832 -0
- webscout/optimizers.py +270 -0
- webscout/prompt_manager.py +279 -0
- webscout/scout/__init__.py +11 -0
- webscout/scout/core.py +884 -0
- webscout/scout/element.py +459 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +38 -0
- webscout/swiftcli/__init__.py +810 -0
- webscout/update_checker.py +125 -0
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +55 -0
- webscout/zeroart/base.py +61 -0
- webscout/zeroart/effects.py +99 -0
- webscout/zeroart/fonts.py +816 -0
- webscout/zerodir/__init__.py +225 -0
- {webscout-6.3.dist-info → webscout-6.5.dist-info}/METADATA +37 -112
- webscout-6.5.dist-info/RECORD +179 -0
- webscout/Agents/Onlinesearcher.py +0 -182
- webscout/Agents/__init__.py +0 -2
- webscout/Agents/functioncall.py +0 -248
- webscout/Bing_search.py +0 -154
- webscout/Provider/TTI/AIuncensoredimage.py +0 -103
- webscout/Provider/TTI/Nexra.py +0 -120
- webscout/Provider/TTI/PollinationsAI.py +0 -138
- webscout/Provider/TTI/WebSimAI.py +0 -142
- webscout/Provider/TTI/aiforce.py +0 -160
- webscout/Provider/TTI/artbit.py +0 -141
- webscout/Provider/TTI/deepinfra.py +0 -148
- webscout/Provider/TTI/huggingface.py +0 -155
- webscout/Provider/TTI/talkai.py +0 -116
- webscout/g4f.py +0 -666
- webscout/models.py +0 -23
- webscout/requestsHTMLfix.py +0 -775
- webscout/webai.py +0 -2590
- webscout-6.3.dist-info/RECORD +0 -124
- {webscout-6.3.dist-info → webscout-6.5.dist-info}/LICENSE.md +0 -0
- {webscout-6.3.dist-info → webscout-6.5.dist-info}/WHEEL +0 -0
- {webscout-6.3.dist-info → webscout-6.5.dist-info}/entry_points.txt +0 -0
- {webscout-6.3.dist-info → webscout-6.5.dist-info}/top_level.txt +0 -0
webscout/Extra/weather_ascii.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
from rich.console import Console
|
|
3
|
-
from
|
|
3
|
+
from webscout.zeroart import figlet_format
|
|
4
4
|
|
|
5
5
|
console = Console()
|
|
6
6
|
def get(location):
|
|
@@ -12,7 +12,7 @@ def get(location):
|
|
|
12
12
|
str: ASCII art weather report if the request is successful,
|
|
13
13
|
otherwise an error message.
|
|
14
14
|
"""
|
|
15
|
-
console.print(f"[bold green]{figlet_format('Weather')}
|
|
15
|
+
console.print(f"[bold green]{figlet_format('Weather')}")
|
|
16
16
|
url = f"https://wttr.in/{location}"
|
|
17
17
|
response = requests.get(url, headers={'User-Agent': 'curl'})
|
|
18
18
|
|
webscout/LLM.py
CHANGED
|
@@ -1,14 +1,53 @@
|
|
|
1
|
+
"""
|
|
2
|
+
>>> from webscout.LLM import LLM, VLM
|
|
3
|
+
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
4
|
+
>>> response = llm.chat([{"role": "user", "content": "What's good?"}])
|
|
5
|
+
>>> print(response)
|
|
6
|
+
'Hey! I'm doing great, thanks for asking! How can I help you today? 😊'
|
|
7
|
+
|
|
8
|
+
>>> # For vision tasks
|
|
9
|
+
>>> vlm = VLM("cogvlm-grounding-generalist")
|
|
10
|
+
>>> response = vlm.chat([{"role": "user", "content": [{"type": "image", "image_url": "path/to/image.jpg"}, {"type": "text", "text": "What's in this image?"}]}])
|
|
11
|
+
"""
|
|
12
|
+
|
|
1
13
|
import requests
|
|
2
14
|
import base64
|
|
3
15
|
import json
|
|
4
16
|
from typing import List, Dict, Union, Generator, Optional, Any
|
|
5
17
|
|
|
6
18
|
class LLMError(Exception):
|
|
7
|
-
"""Custom exception for LLM API errors
|
|
19
|
+
"""Custom exception for LLM API errors 🚫
|
|
20
|
+
|
|
21
|
+
Examples:
|
|
22
|
+
>>> try:
|
|
23
|
+
... raise LLMError("API key not found!")
|
|
24
|
+
... except LLMError as e:
|
|
25
|
+
... print(f"Error: {e}")
|
|
26
|
+
Error: API key not found!
|
|
27
|
+
"""
|
|
8
28
|
pass
|
|
9
29
|
|
|
10
30
|
class LLM:
|
|
11
|
-
"""A class for
|
|
31
|
+
"""A class for chatting with DeepInfra's powerful language models! 🚀
|
|
32
|
+
|
|
33
|
+
This class lets you:
|
|
34
|
+
- Chat with state-of-the-art language models 💬
|
|
35
|
+
- Stream responses in real-time ⚡
|
|
36
|
+
- Control temperature and token limits 🎮
|
|
37
|
+
- Handle system messages and chat history 📝
|
|
38
|
+
|
|
39
|
+
Examples:
|
|
40
|
+
>>> from webscout.LLM import LLM
|
|
41
|
+
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
42
|
+
>>> response = llm.chat([
|
|
43
|
+
... {"role": "user", "content": "Write a short poem!"}
|
|
44
|
+
... ])
|
|
45
|
+
>>> print(response)
|
|
46
|
+
'Through starlit skies and morning dew,
|
|
47
|
+
Nature's beauty, forever new.
|
|
48
|
+
In every moment, magic gleams,
|
|
49
|
+
Life's poetry flows like gentle streams.'
|
|
50
|
+
"""
|
|
12
51
|
|
|
13
52
|
def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
|
|
14
53
|
"""
|
|
@@ -17,6 +56,11 @@ class LLM:
|
|
|
17
56
|
Args:
|
|
18
57
|
model: The model identifier (e.g., "meta-llama/Meta-Llama-3-70B-Instruct")
|
|
19
58
|
system_message: The system message to use for the conversation
|
|
59
|
+
|
|
60
|
+
Examples:
|
|
61
|
+
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
62
|
+
>>> print(llm.model)
|
|
63
|
+
'meta-llama/Meta-Llama-3-70B-Instruct'
|
|
20
64
|
"""
|
|
21
65
|
self.model = model
|
|
22
66
|
self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
@@ -48,7 +92,26 @@ class LLM:
|
|
|
48
92
|
max_tokens: int = 8028,
|
|
49
93
|
stop: Optional[List[str]] = None,
|
|
50
94
|
) -> Dict[str, Any]:
|
|
51
|
-
"""Prepare the
|
|
95
|
+
"""Prepare the chat payload with all the right settings! 🎯
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
messages: Your chat messages (role & content)
|
|
99
|
+
stream: Want real-time responses? Set True! ⚡
|
|
100
|
+
temperature: Creativity level (0-1) 🎨
|
|
101
|
+
max_tokens: Max words to generate 📝
|
|
102
|
+
stop: Words to stop at (optional) 🛑
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
Dict with all the API settings ready to go! 🚀
|
|
106
|
+
|
|
107
|
+
Examples:
|
|
108
|
+
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
109
|
+
>>> payload = llm._prepare_payload([
|
|
110
|
+
... {"role": "user", "content": "Hi!"}
|
|
111
|
+
... ])
|
|
112
|
+
>>> print(payload['model'])
|
|
113
|
+
'meta-llama/Meta-Llama-3-70B-Instruct'
|
|
114
|
+
"""
|
|
52
115
|
return {
|
|
53
116
|
'model': self.model,
|
|
54
117
|
'messages': messages,
|
|
@@ -66,21 +129,38 @@ class LLM:
|
|
|
66
129
|
max_tokens: int = 8028,
|
|
67
130
|
stop: Optional[List[str]] = None,
|
|
68
131
|
) -> Union[str, Generator[str, None, None]]:
|
|
69
|
-
"""
|
|
70
|
-
|
|
71
|
-
|
|
132
|
+
"""Start chatting with the AI! 💬
|
|
133
|
+
|
|
134
|
+
This method is your gateway to:
|
|
135
|
+
- Having awesome conversations 🗣️
|
|
136
|
+
- Getting creative responses 🎨
|
|
137
|
+
- Streaming real-time replies ⚡
|
|
138
|
+
- Controlling the output style 🎮
|
|
139
|
+
|
|
72
140
|
Args:
|
|
73
|
-
messages:
|
|
74
|
-
stream:
|
|
75
|
-
temperature:
|
|
76
|
-
max_tokens:
|
|
77
|
-
stop:
|
|
78
|
-
|
|
141
|
+
messages: Your chat messages (role & content)
|
|
142
|
+
stream: Want real-time responses? Set True!
|
|
143
|
+
temperature: Creativity level (0-1)
|
|
144
|
+
max_tokens: Max words to generate
|
|
145
|
+
stop: Words to stop at (optional)
|
|
146
|
+
|
|
79
147
|
Returns:
|
|
80
|
-
Either a
|
|
81
|
-
|
|
148
|
+
Either a complete response or streaming generator
|
|
149
|
+
|
|
82
150
|
Raises:
|
|
83
|
-
LLMError: If
|
|
151
|
+
LLMError: If something goes wrong 🚫
|
|
152
|
+
|
|
153
|
+
Examples:
|
|
154
|
+
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
155
|
+
>>> # Regular chat
|
|
156
|
+
>>> response = llm.chat([
|
|
157
|
+
... {"role": "user", "content": "Tell me a joke!"}
|
|
158
|
+
... ])
|
|
159
|
+
>>> # Streaming chat
|
|
160
|
+
>>> for chunk in llm.chat([
|
|
161
|
+
... {"role": "user", "content": "Tell me a story!"}
|
|
162
|
+
... ], stream=True):
|
|
163
|
+
... print(chunk, end='')
|
|
84
164
|
"""
|
|
85
165
|
payload = self._prepare_payload(messages, stream, temperature, max_tokens, stop)
|
|
86
166
|
|
|
@@ -93,7 +173,24 @@ class LLM:
|
|
|
93
173
|
raise LLMError(f"API request failed: {str(e)}")
|
|
94
174
|
|
|
95
175
|
def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
|
|
96
|
-
"""Stream the chat response
|
|
176
|
+
"""Stream the chat response in real-time! ⚡
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
payload: The prepared chat payload
|
|
180
|
+
|
|
181
|
+
Yields:
|
|
182
|
+
Streaming chunks of the response
|
|
183
|
+
|
|
184
|
+
Raises:
|
|
185
|
+
LLMError: If the stream request fails 🚫
|
|
186
|
+
|
|
187
|
+
Examples:
|
|
188
|
+
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
189
|
+
>>> for chunk in llm._stream_response(llm._prepare_payload([
|
|
190
|
+
... {"role": "user", "content": "Tell me a story!"}
|
|
191
|
+
... ])):
|
|
192
|
+
... print(chunk, end='')
|
|
193
|
+
"""
|
|
97
194
|
try:
|
|
98
195
|
with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
|
|
99
196
|
response.raise_for_status()
|
|
@@ -112,7 +209,24 @@ class LLM:
|
|
|
112
209
|
raise LLMError(f"Stream request failed: {str(e)}")
|
|
113
210
|
|
|
114
211
|
def _send_request(self, payload: Dict[str, Any]) -> str:
|
|
115
|
-
"""Send a non-streaming chat request.
|
|
212
|
+
"""Send a non-streaming chat request.
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
payload: The prepared chat payload
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
The complete response
|
|
219
|
+
|
|
220
|
+
Raises:
|
|
221
|
+
LLMError: If the request fails 🚫
|
|
222
|
+
|
|
223
|
+
Examples:
|
|
224
|
+
>>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
225
|
+
>>> response = llm._send_request(llm._prepare_payload([
|
|
226
|
+
... {"role": "user", "content": "Tell me a joke!"}
|
|
227
|
+
... ]))
|
|
228
|
+
>>> print(response)
|
|
229
|
+
"""
|
|
116
230
|
try:
|
|
117
231
|
response = requests.post(self.api_url, json=payload, headers=self.headers)
|
|
118
232
|
response.raise_for_status()
|
|
@@ -127,15 +241,40 @@ class LLM:
|
|
|
127
241
|
|
|
128
242
|
|
|
129
243
|
class VLM:
|
|
130
|
-
"""
|
|
131
|
-
|
|
244
|
+
"""Your gateway to vision-language AI magic! 🖼️
|
|
245
|
+
|
|
246
|
+
This class lets you:
|
|
247
|
+
- Chat about images with AI 🎨
|
|
248
|
+
- Get detailed image descriptions 📝
|
|
249
|
+
- Answer questions about images 🤔
|
|
250
|
+
- Stream responses in real-time ⚡
|
|
251
|
+
|
|
252
|
+
Examples:
|
|
253
|
+
>>> from webscout.LLM import VLM
|
|
254
|
+
>>> vlm = VLM("cogvlm-grounding-generalist")
|
|
255
|
+
>>> # Chat about an image
|
|
256
|
+
>>> response = vlm.chat([{
|
|
257
|
+
... "role": "user",
|
|
258
|
+
... "content": [
|
|
259
|
+
... {"type": "image", "image_url": "path/to/image.jpg"},
|
|
260
|
+
... {"type": "text", "text": "What's in this image?"}
|
|
261
|
+
... ]
|
|
262
|
+
... }])
|
|
263
|
+
>>> print(response)
|
|
264
|
+
'I see a beautiful sunset over mountains...'
|
|
265
|
+
"""
|
|
266
|
+
|
|
132
267
|
def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
|
|
133
|
-
"""
|
|
134
|
-
|
|
135
|
-
|
|
268
|
+
"""Get ready for some vision-language magic! 🚀
|
|
269
|
+
|
|
136
270
|
Args:
|
|
137
|
-
model:
|
|
138
|
-
system_message:
|
|
271
|
+
model: Your chosen vision model
|
|
272
|
+
system_message: Set the AI's personality
|
|
273
|
+
|
|
274
|
+
Examples:
|
|
275
|
+
>>> vlm = VLM("cogvlm-grounding-generalist")
|
|
276
|
+
>>> print(vlm.model)
|
|
277
|
+
'cogvlm-grounding-generalist'
|
|
139
278
|
"""
|
|
140
279
|
self.model = model
|
|
141
280
|
self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
@@ -166,20 +305,39 @@ class VLM:
|
|
|
166
305
|
temperature: float = 0.7,
|
|
167
306
|
max_tokens: int = 8028,
|
|
168
307
|
) -> Union[str, Generator[str, None, None]]:
|
|
169
|
-
"""
|
|
170
|
-
|
|
171
|
-
|
|
308
|
+
"""Chat about images with AI! 🖼️
|
|
309
|
+
|
|
310
|
+
This method lets you:
|
|
311
|
+
- Ask questions about images 🤔
|
|
312
|
+
- Get detailed descriptions 📝
|
|
313
|
+
- Stream responses in real-time ⚡
|
|
314
|
+
- Control response creativity 🎨
|
|
315
|
+
|
|
172
316
|
Args:
|
|
173
|
-
messages:
|
|
174
|
-
stream:
|
|
175
|
-
temperature:
|
|
176
|
-
max_tokens:
|
|
177
|
-
|
|
317
|
+
messages: Your chat + image data
|
|
318
|
+
stream: Want real-time responses?
|
|
319
|
+
temperature: Creativity level (0-1)
|
|
320
|
+
max_tokens: Max words to generate
|
|
321
|
+
|
|
178
322
|
Returns:
|
|
179
|
-
Either a
|
|
180
|
-
|
|
323
|
+
Either a complete response or streaming generator
|
|
324
|
+
|
|
181
325
|
Raises:
|
|
182
|
-
LLMError: If
|
|
326
|
+
LLMError: If something goes wrong 🚫
|
|
327
|
+
|
|
328
|
+
Examples:
|
|
329
|
+
>>> vlm = VLM("cogvlm-grounding-generalist")
|
|
330
|
+
>>> # Regular chat with image
|
|
331
|
+
>>> response = vlm.chat([{
|
|
332
|
+
... "role": "user",
|
|
333
|
+
... "content": [
|
|
334
|
+
... {"type": "image", "image_url": "sunset.jpg"},
|
|
335
|
+
... {"type": "text", "text": "Describe this scene"}
|
|
336
|
+
... ]
|
|
337
|
+
... }])
|
|
338
|
+
>>> # Streaming chat
|
|
339
|
+
>>> for chunk in vlm.chat([...], stream=True):
|
|
340
|
+
... print(chunk, end='')
|
|
183
341
|
"""
|
|
184
342
|
payload = {
|
|
185
343
|
"model": self.model,
|
|
@@ -232,17 +390,22 @@ class VLM:
|
|
|
232
390
|
|
|
233
391
|
|
|
234
392
|
def encode_image_to_base64(image_path: str) -> str:
|
|
235
|
-
"""
|
|
236
|
-
|
|
237
|
-
|
|
393
|
+
"""Turn your image into base64 magic! 🎨
|
|
394
|
+
|
|
238
395
|
Args:
|
|
239
|
-
image_path:
|
|
240
|
-
|
|
396
|
+
image_path: Where's your image at?
|
|
397
|
+
|
|
241
398
|
Returns:
|
|
242
|
-
|
|
243
|
-
|
|
399
|
+
Your image as a base64 string ✨
|
|
400
|
+
|
|
244
401
|
Raises:
|
|
245
|
-
IOError: If
|
|
402
|
+
IOError: If we can't read your image 🚫
|
|
403
|
+
|
|
404
|
+
Examples:
|
|
405
|
+
>>> from webscout.LLM import encode_image_to_base64
|
|
406
|
+
>>> image_data = encode_image_to_base64("cool_pic.jpg")
|
|
407
|
+
>>> print(len(image_data)) # Check the encoded length
|
|
408
|
+
12345
|
|
246
409
|
"""
|
|
247
410
|
try:
|
|
248
411
|
with open(image_path, "rb") as image_file:
|