webscout 7.1__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (144) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +3 -3
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +3 -4
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +3 -3
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +3 -3
  45. webscout/Provider/Groq.py +5 -1
  46. webscout/Provider/Jadve.py +3 -3
  47. webscout/Provider/Marcus.py +191 -192
  48. webscout/Provider/Netwrck.py +3 -3
  49. webscout/Provider/PI.py +2 -2
  50. webscout/Provider/PizzaGPT.py +2 -3
  51. webscout/Provider/QwenLM.py +311 -0
  52. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  53. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  54. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  55. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  56. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  57. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  58. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  59. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  60. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  61. webscout/Provider/TTI/artbit/__init__.py +22 -22
  62. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  63. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  64. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  65. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  66. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  67. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  68. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  69. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  70. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  71. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  72. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  73. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  74. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  75. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  76. webscout/Provider/TTI/talkai/__init__.py +4 -4
  77. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  78. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  79. webscout/Provider/TTS/deepgram.py +182 -182
  80. webscout/Provider/TTS/elevenlabs.py +136 -136
  81. webscout/Provider/TTS/gesserit.py +150 -150
  82. webscout/Provider/TTS/murfai.py +138 -138
  83. webscout/Provider/TTS/parler.py +133 -134
  84. webscout/Provider/TTS/streamElements.py +360 -360
  85. webscout/Provider/TTS/utils.py +280 -280
  86. webscout/Provider/TTS/voicepod.py +116 -116
  87. webscout/Provider/TextPollinationsAI.py +2 -3
  88. webscout/Provider/WiseCat.py +193 -0
  89. webscout/Provider/__init__.py +144 -134
  90. webscout/Provider/cerebras.py +242 -227
  91. webscout/Provider/chatglm.py +204 -204
  92. webscout/Provider/dgaf.py +2 -3
  93. webscout/Provider/gaurish.py +2 -3
  94. webscout/Provider/geminiapi.py +208 -208
  95. webscout/Provider/granite.py +223 -0
  96. webscout/Provider/hermes.py +218 -218
  97. webscout/Provider/llama3mitril.py +179 -179
  98. webscout/Provider/llamatutor.py +3 -3
  99. webscout/Provider/llmchat.py +2 -3
  100. webscout/Provider/meta.py +794 -794
  101. webscout/Provider/multichat.py +331 -331
  102. webscout/Provider/typegpt.py +359 -359
  103. webscout/Provider/yep.py +2 -2
  104. webscout/__main__.py +5 -5
  105. webscout/cli.py +319 -319
  106. webscout/conversation.py +241 -242
  107. webscout/exceptions.py +328 -328
  108. webscout/litagent/__init__.py +28 -28
  109. webscout/litagent/agent.py +2 -3
  110. webscout/litprinter/__init__.py +0 -58
  111. webscout/scout/__init__.py +8 -8
  112. webscout/scout/core.py +884 -884
  113. webscout/scout/element.py +459 -459
  114. webscout/scout/parsers/__init__.py +69 -69
  115. webscout/scout/parsers/html5lib_parser.py +172 -172
  116. webscout/scout/parsers/html_parser.py +236 -236
  117. webscout/scout/parsers/lxml_parser.py +178 -178
  118. webscout/scout/utils.py +38 -38
  119. webscout/swiftcli/__init__.py +811 -811
  120. webscout/update_checker.py +2 -12
  121. webscout/version.py +1 -1
  122. webscout/webscout_search.py +5 -4
  123. webscout/zeroart/__init__.py +54 -54
  124. webscout/zeroart/base.py +60 -60
  125. webscout/zeroart/effects.py +99 -99
  126. webscout/zeroart/fonts.py +816 -816
  127. {webscout-7.1.dist-info → webscout-7.2.dist-info}/METADATA +4 -3
  128. webscout-7.2.dist-info/RECORD +217 -0
  129. webstoken/__init__.py +30 -30
  130. webstoken/classifier.py +189 -189
  131. webstoken/keywords.py +216 -216
  132. webstoken/language.py +128 -128
  133. webstoken/ner.py +164 -164
  134. webstoken/normalizer.py +35 -35
  135. webstoken/processor.py +77 -77
  136. webstoken/sentiment.py +206 -206
  137. webstoken/stemmer.py +73 -73
  138. webstoken/tagger.py +60 -60
  139. webstoken/tokenizer.py +158 -158
  140. webscout-7.1.dist-info/RECORD +0 -198
  141. {webscout-7.1.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  142. {webscout-7.1.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  143. {webscout-7.1.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  144. {webscout-7.1.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
webscout/LLM.py CHANGED
@@ -1,442 +1,442 @@
1
- """
2
- >>> from webscout.LLM import LLM, VLM
3
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
4
- >>> response = llm.chat([{"role": "user", "content": "What's good?"}])
5
- >>> print(response)
6
- 'Hey! I'm doing great, thanks for asking! How can I help you today? 😊'
7
-
8
- >>> # For vision tasks
9
- >>> vlm = VLM("cogvlm-grounding-generalist")
10
- >>> response = vlm.chat([{"role": "user", "content": [{"type": "image", "image_url": "path/to/image.jpg"}, {"type": "text", "text": "What's in this image?"}]}])
11
- """
12
-
13
- import requests
14
- import base64
15
- import json
16
- from typing import List, Dict, Union, Generator, Optional, Any
17
-
18
- class LLMError(Exception):
19
- """Custom exception for LLM API errors 🚫
20
-
21
- Examples:
22
- >>> try:
23
- ... raise LLMError("API key not found!")
24
- ... except LLMError as e:
25
- ... print(f"Error: {e}")
26
- Error: API key not found!
27
- """
28
- pass
29
-
30
- class LLM:
31
- """A class for chatting with DeepInfra's powerful language models! 🚀
32
-
33
- This class lets you:
34
- - Chat with state-of-the-art language models 💬
35
- - Stream responses in real-time ⚡
36
- - Control temperature and token limits 🎮
37
- - Handle system messages and chat history 📝
38
-
39
- Examples:
40
- >>> from webscout.LLM import LLM
41
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
42
- >>> response = llm.chat([
43
- ... {"role": "user", "content": "Write a short poem!"}
44
- ... ])
45
- >>> print(response)
46
- 'Through starlit skies and morning dew,
47
- Nature's beauty, forever new.
48
- In every moment, magic gleams,
49
- Life's poetry flows like gentle streams.'
50
- """
51
-
52
- def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
53
- """
54
- Initialize the LLM client.
55
-
56
- Args:
57
- model: The model identifier (e.g., "meta-llama/Meta-Llama-3-70B-Instruct")
58
- system_message: The system message to use for the conversation
59
-
60
- Examples:
61
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
62
- >>> print(llm.model)
63
- 'meta-llama/Meta-Llama-3-70B-Instruct'
64
- """
65
- self.model = model
66
- self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
67
- self.conversation_history = [{"role": "system", "content": system_message}]
68
- self.headers = {
69
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
70
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
71
- 'Cache-Control': 'no-cache',
72
- 'Connection': 'keep-alive',
73
- 'Content-Type': 'application/json',
74
- 'Origin': 'https://deepinfra.com',
75
- 'Pragma': 'no-cache',
76
- 'Referer': 'https://deepinfra.com/',
77
- 'Sec-Fetch-Dest': 'empty',
78
- 'Sec-Fetch-Mode': 'cors',
79
- 'Sec-Fetch-Site': 'same-site',
80
- 'X-Deepinfra-Source': 'web-embed',
81
- 'accept': 'text/event-stream',
82
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
83
- 'sec-ch-ua-mobile': '?0',
84
- 'sec-ch-ua-platform': '"macOS"'
85
- }
86
-
87
- def _prepare_payload(
88
- self,
89
- messages: List[Dict[str, str]],
90
- stream: bool = False,
91
- temperature: float = 0.7,
92
- max_tokens: int = 8028,
93
- stop: Optional[List[str]] = None,
94
- ) -> Dict[str, Any]:
95
- """Prepare the chat payload with all the right settings! 🎯
96
-
97
- Args:
98
- messages: Your chat messages (role & content)
99
- stream: Want real-time responses? Set True! ⚡
100
- temperature: Creativity level (0-1) 🎨
101
- max_tokens: Max words to generate 📝
102
- stop: Words to stop at (optional) 🛑
103
-
104
- Returns:
105
- Dict with all the API settings ready to go! 🚀
106
-
107
- Examples:
108
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
109
- >>> payload = llm._prepare_payload([
110
- ... {"role": "user", "content": "Hi!"}
111
- ... ])
112
- >>> print(payload['model'])
113
- 'meta-llama/Meta-Llama-3-70B-Instruct'
114
- """
115
- return {
116
- 'model': self.model,
117
- 'messages': messages,
118
- 'temperature': temperature,
119
- 'max_tokens': max_tokens,
120
- 'stop': stop or [],
121
- 'stream': stream
122
- }
123
-
124
- def chat(
125
- self,
126
- messages: List[Dict[str, str]],
127
- stream: bool = False,
128
- temperature: float = 0.7,
129
- max_tokens: int = 8028,
130
- stop: Optional[List[str]] = None,
131
- ) -> Union[str, Generator[str, None, None]]:
132
- """Start chatting with the AI! 💬
133
-
134
- This method is your gateway to:
135
- - Having awesome conversations 🗣️
136
- - Getting creative responses 🎨
137
- - Streaming real-time replies ⚡
138
- - Controlling the output style 🎮
139
-
140
- Args:
141
- messages: Your chat messages (role & content)
142
- stream: Want real-time responses? Set True!
143
- temperature: Creativity level (0-1)
144
- max_tokens: Max words to generate
145
- stop: Words to stop at (optional)
146
-
147
- Returns:
148
- Either a complete response or streaming generator
149
-
150
- Raises:
151
- LLMError: If something goes wrong 🚫
152
-
153
- Examples:
154
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
155
- >>> # Regular chat
156
- >>> response = llm.chat([
157
- ... {"role": "user", "content": "Tell me a joke!"}
158
- ... ])
159
- >>> # Streaming chat
160
- >>> for chunk in llm.chat([
161
- ... {"role": "user", "content": "Tell me a story!"}
162
- ... ], stream=True):
163
- ... print(chunk, end='')
164
- """
165
- payload = self._prepare_payload(messages, stream, temperature, max_tokens, stop)
166
-
167
- try:
168
- if stream:
169
- return self._stream_response(payload)
170
- else:
171
- return self._send_request(payload)
172
- except Exception as e:
173
- raise LLMError(f"API request failed: {str(e)}")
174
-
175
- def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
176
- """Stream the chat response in real-time! ⚡
177
-
178
- Args:
179
- payload: The prepared chat payload
180
-
181
- Yields:
182
- Streaming chunks of the response
183
-
184
- Raises:
185
- LLMError: If the stream request fails 🚫
186
-
187
- Examples:
188
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
189
- >>> for chunk in llm._stream_response(llm._prepare_payload([
190
- ... {"role": "user", "content": "Tell me a story!"}
191
- ... ])):
192
- ... print(chunk, end='')
193
- """
194
- try:
195
- with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
196
- response.raise_for_status()
197
- for line in response.iter_lines():
198
- if line:
199
- if line.strip() == b'data: [DONE]':
200
- break
201
- if line.startswith(b'data: '):
202
- try:
203
- chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
204
- if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
205
- yield content
206
- except json.JSONDecodeError:
207
- continue
208
- except requests.RequestException as e:
209
- raise LLMError(f"Stream request failed: {str(e)}")
210
-
211
- def _send_request(self, payload: Dict[str, Any]) -> str:
212
- """Send a non-streaming chat request.
213
-
214
- Args:
215
- payload: The prepared chat payload
216
-
217
- Returns:
218
- The complete response
219
-
220
- Raises:
221
- LLMError: If the request fails 🚫
222
-
223
- Examples:
224
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
225
- >>> response = llm._send_request(llm._prepare_payload([
226
- ... {"role": "user", "content": "Tell me a joke!"}
227
- ... ]))
228
- >>> print(response)
229
- """
230
- try:
231
- response = requests.post(self.api_url, json=payload, headers=self.headers)
232
- response.raise_for_status()
233
- result = response.json()
234
- return result['choices'][0]['message']['content']
235
- except requests.RequestException as e:
236
- raise LLMError(f"Request failed: {str(e)}")
237
- except (KeyError, IndexError) as e:
238
- raise LLMError(f"Invalid response format: {str(e)}")
239
- except json.JSONDecodeError as e:
240
- raise LLMError(f"Invalid JSON response: {str(e)}")
241
-
242
-
243
- class VLM:
244
- """Your gateway to vision-language AI magic! 🖼️
245
-
246
- This class lets you:
247
- - Chat about images with AI 🎨
248
- - Get detailed image descriptions 📝
249
- - Answer questions about images 🤔
250
- - Stream responses in real-time ⚡
251
-
252
- Examples:
253
- >>> from webscout.LLM import VLM
254
- >>> vlm = VLM("cogvlm-grounding-generalist")
255
- >>> # Chat about an image
256
- >>> response = vlm.chat([{
257
- ... "role": "user",
258
- ... "content": [
259
- ... {"type": "image", "image_url": "path/to/image.jpg"},
260
- ... {"type": "text", "text": "What's in this image?"}
261
- ... ]
262
- ... }])
263
- >>> print(response)
264
- 'I see a beautiful sunset over mountains...'
265
- """
266
-
267
- def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
268
- """Get ready for some vision-language magic! 🚀
269
-
270
- Args:
271
- model: Your chosen vision model
272
- system_message: Set the AI's personality
273
-
274
- Examples:
275
- >>> vlm = VLM("cogvlm-grounding-generalist")
276
- >>> print(vlm.model)
277
- 'cogvlm-grounding-generalist'
278
- """
279
- self.model = model
280
- self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
281
- self.conversation_history = [{"role": "system", "content": system_message}]
282
- self.headers = {
283
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
284
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
285
- 'Cache-Control': 'no-cache',
286
- 'Connection': 'keep-alive',
287
- 'Content-Type': 'application/json',
288
- 'Origin': 'https://deepinfra.com',
289
- 'Pragma': 'no-cache',
290
- 'Referer': 'https://deepinfra.com/',
291
- 'Sec-Fetch-Dest': 'empty',
292
- 'Sec-Fetch-Mode': 'cors',
293
- 'Sec-Fetch-Site': 'same-site',
294
- 'X-Deepinfra-Source': 'web-embed',
295
- 'accept': 'text/event-stream',
296
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
297
- 'sec-ch-ua-mobile': '?0',
298
- 'sec-ch-ua-platform': '"macOS"'
299
- }
300
-
301
- def chat(
302
- self,
303
- messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]],
304
- stream: bool = False,
305
- temperature: float = 0.7,
306
- max_tokens: int = 8028,
307
- ) -> Union[str, Generator[str, None, None]]:
308
- """Chat about images with AI! 🖼️
309
-
310
- This method lets you:
311
- - Ask questions about images 🤔
312
- - Get detailed descriptions 📝
313
- - Stream responses in real-time ⚡
314
- - Control response creativity 🎨
315
-
316
- Args:
317
- messages: Your chat + image data
318
- stream: Want real-time responses?
319
- temperature: Creativity level (0-1)
320
- max_tokens: Max words to generate
321
-
322
- Returns:
323
- Either a complete response or streaming generator
324
-
325
- Raises:
326
- LLMError: If something goes wrong 🚫
327
-
328
- Examples:
329
- >>> vlm = VLM("cogvlm-grounding-generalist")
330
- >>> # Regular chat with image
331
- >>> response = vlm.chat([{
332
- ... "role": "user",
333
- ... "content": [
334
- ... {"type": "image", "image_url": "sunset.jpg"},
335
- ... {"type": "text", "text": "Describe this scene"}
336
- ... ]
337
- ... }])
338
- >>> # Streaming chat
339
- >>> for chunk in vlm.chat([...], stream=True):
340
- ... print(chunk, end='')
341
- """
342
- payload = {
343
- "model": self.model,
344
- "messages": messages,
345
- "stream": stream,
346
- "temperature": temperature,
347
- "max_tokens": max_tokens
348
- }
349
-
350
- try:
351
- if stream:
352
- return self._stream_response(payload)
353
- else:
354
- return self._send_request(payload)
355
- except Exception as e:
356
- raise LLMError(f"VLM API request failed: {str(e)}")
357
-
358
- def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
359
- """Stream the VLM chat response."""
360
- try:
361
- with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
362
- response.raise_for_status()
363
- for line in response.iter_lines():
364
- if line:
365
- if line.strip() == b'data: [DONE]':
366
- break
367
- if line.startswith(b'data: '):
368
- try:
369
- chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
370
- if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
371
- yield content
372
- except json.JSONDecodeError:
373
- continue
374
- except requests.RequestException as e:
375
- raise LLMError(f"VLM stream request failed: {str(e)}")
376
-
377
- def _send_request(self, payload: Dict[str, Any]) -> str:
378
- """Send a non-streaming VLM chat request."""
379
- try:
380
- response = requests.post(self.api_url, json=payload, headers=self.headers)
381
- response.raise_for_status()
382
- result = response.json()
383
- return result['choices'][0]['message']['content']
384
- except requests.RequestException as e:
385
- raise LLMError(f"VLM request failed: {str(e)}")
386
- except (KeyError, IndexError) as e:
387
- raise LLMError(f"Invalid VLM response format: {str(e)}")
388
- except json.JSONDecodeError as e:
389
- raise LLMError(f"Invalid VLM JSON response: {str(e)}")
390
-
391
-
392
- def encode_image_to_base64(image_path: str) -> str:
393
- """Turn your image into base64 magic! 🎨
394
-
395
- Args:
396
- image_path: Where's your image at?
397
-
398
- Returns:
399
- Your image as a base64 string ✨
400
-
401
- Raises:
402
- IOError: If we can't read your image 🚫
403
-
404
- Examples:
405
- >>> from webscout.LLM import encode_image_to_base64
406
- >>> image_data = encode_image_to_base64("cool_pic.jpg")
407
- >>> print(len(image_data)) # Check the encoded length
408
- 12345
409
- """
410
- try:
411
- with open(image_path, "rb") as image_file:
412
- return base64.b64encode(image_file.read()).decode("utf-8")
413
- except IOError as e:
414
- raise LLMError(f"Failed to read image file: {str(e)}")
415
-
416
-
417
- if __name__ == "__main__":
418
- # Example usage
419
- try:
420
- # Initialize LLM with Llama 3 model
421
- llm = LLM(model="mistralai/Mistral-Small-24B-Instruct-2501")
422
-
423
- # Example messages
424
- messages = [
425
- {"role": "user", "content": "Write a short poem about AI."}
426
- ]
427
-
428
- # Example 1: Non-streaming response
429
- print("\nNon-streaming response:")
430
- response = llm.chat(messages, stream=False)
431
- print(response)
432
-
433
- # Example 2: Streaming response
434
- print("\nStreaming response:")
435
- for chunk in llm.chat(messages, stream=True):
436
- print(chunk, end='', flush=True)
437
- print("\n")
438
-
439
- except LLMError as e:
440
- print(f"Error: {str(e)}")
441
- except KeyboardInterrupt:
442
- print("\nOperation cancelled by user")
1
+ """
2
+ >>> from webscout.LLM import LLM, VLM
3
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
4
+ >>> response = llm.chat([{"role": "user", "content": "What's good?"}])
5
+ >>> print(response)
6
+ 'Hey! I'm doing great, thanks for asking! How can I help you today? 😊'
7
+
8
+ >>> # For vision tasks
9
+ >>> vlm = VLM("cogvlm-grounding-generalist")
10
+ >>> response = vlm.chat([{"role": "user", "content": [{"type": "image", "image_url": "path/to/image.jpg"}, {"type": "text", "text": "What's in this image?"}]}])
11
+ """
12
+
13
+ import requests
14
+ import base64
15
+ import json
16
+ from typing import List, Dict, Union, Generator, Optional, Any
17
+
18
+ class LLMError(Exception):
19
+ """Custom exception for LLM API errors 🚫
20
+
21
+ Examples:
22
+ >>> try:
23
+ ... raise LLMError("API key not found!")
24
+ ... except LLMError as e:
25
+ ... print(f"Error: {e}")
26
+ Error: API key not found!
27
+ """
28
+ pass
29
+
30
+ class LLM:
31
+ """A class for chatting with DeepInfra's powerful language models! 🚀
32
+
33
+ This class lets you:
34
+ - Chat with state-of-the-art language models 💬
35
+ - Stream responses in real-time ⚡
36
+ - Control temperature and token limits 🎮
37
+ - Handle system messages and chat history 📝
38
+
39
+ Examples:
40
+ >>> from webscout.LLM import LLM
41
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
42
+ >>> response = llm.chat([
43
+ ... {"role": "user", "content": "Write a short poem!"}
44
+ ... ])
45
+ >>> print(response)
46
+ 'Through starlit skies and morning dew,
47
+ Nature's beauty, forever new.
48
+ In every moment, magic gleams,
49
+ Life's poetry flows like gentle streams.'
50
+ """
51
+
52
+ def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
53
+ """
54
+ Initialize the LLM client.
55
+
56
+ Args:
57
+ model: The model identifier (e.g., "meta-llama/Meta-Llama-3-70B-Instruct")
58
+ system_message: The system message to use for the conversation
59
+
60
+ Examples:
61
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
62
+ >>> print(llm.model)
63
+ 'meta-llama/Meta-Llama-3-70B-Instruct'
64
+ """
65
+ self.model = model
66
+ self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
67
+ self.conversation_history = [{"role": "system", "content": system_message}]
68
+ self.headers = {
69
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
70
+ 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
71
+ 'Cache-Control': 'no-cache',
72
+ 'Connection': 'keep-alive',
73
+ 'Content-Type': 'application/json',
74
+ 'Origin': 'https://deepinfra.com',
75
+ 'Pragma': 'no-cache',
76
+ 'Referer': 'https://deepinfra.com/',
77
+ 'Sec-Fetch-Dest': 'empty',
78
+ 'Sec-Fetch-Mode': 'cors',
79
+ 'Sec-Fetch-Site': 'same-site',
80
+ 'X-Deepinfra-Source': 'web-embed',
81
+ 'accept': 'text/event-stream',
82
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
83
+ 'sec-ch-ua-mobile': '?0',
84
+ 'sec-ch-ua-platform': '"macOS"'
85
+ }
86
+
87
+ def _prepare_payload(
88
+ self,
89
+ messages: List[Dict[str, str]],
90
+ stream: bool = False,
91
+ temperature: float = 0.7,
92
+ max_tokens: int = 8028,
93
+ stop: Optional[List[str]] = None,
94
+ ) -> Dict[str, Any]:
95
+ """Prepare the chat payload with all the right settings! 🎯
96
+
97
+ Args:
98
+ messages: Your chat messages (role & content)
99
+ stream: Want real-time responses? Set True! ⚡
100
+ temperature: Creativity level (0-1) 🎨
101
+ max_tokens: Max words to generate 📝
102
+ stop: Words to stop at (optional) 🛑
103
+
104
+ Returns:
105
+ Dict with all the API settings ready to go! 🚀
106
+
107
+ Examples:
108
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
109
+ >>> payload = llm._prepare_payload([
110
+ ... {"role": "user", "content": "Hi!"}
111
+ ... ])
112
+ >>> print(payload['model'])
113
+ 'meta-llama/Meta-Llama-3-70B-Instruct'
114
+ """
115
+ return {
116
+ 'model': self.model,
117
+ 'messages': messages,
118
+ 'temperature': temperature,
119
+ 'max_tokens': max_tokens,
120
+ 'stop': stop or [],
121
+ 'stream': stream
122
+ }
123
+
124
+ def chat(
125
+ self,
126
+ messages: List[Dict[str, str]],
127
+ stream: bool = False,
128
+ temperature: float = 0.7,
129
+ max_tokens: int = 8028,
130
+ stop: Optional[List[str]] = None,
131
+ ) -> Union[str, Generator[str, None, None]]:
132
+ """Start chatting with the AI! 💬
133
+
134
+ This method is your gateway to:
135
+ - Having awesome conversations 🗣️
136
+ - Getting creative responses 🎨
137
+ - Streaming real-time replies ⚡
138
+ - Controlling the output style 🎮
139
+
140
+ Args:
141
+ messages: Your chat messages (role & content)
142
+ stream: Want real-time responses? Set True!
143
+ temperature: Creativity level (0-1)
144
+ max_tokens: Max words to generate
145
+ stop: Words to stop at (optional)
146
+
147
+ Returns:
148
+ Either a complete response or streaming generator
149
+
150
+ Raises:
151
+ LLMError: If something goes wrong 🚫
152
+
153
+ Examples:
154
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
155
+ >>> # Regular chat
156
+ >>> response = llm.chat([
157
+ ... {"role": "user", "content": "Tell me a joke!"}
158
+ ... ])
159
+ >>> # Streaming chat
160
+ >>> for chunk in llm.chat([
161
+ ... {"role": "user", "content": "Tell me a story!"}
162
+ ... ], stream=True):
163
+ ... print(chunk, end='')
164
+ """
165
+ payload = self._prepare_payload(messages, stream, temperature, max_tokens, stop)
166
+
167
+ try:
168
+ if stream:
169
+ return self._stream_response(payload)
170
+ else:
171
+ return self._send_request(payload)
172
+ except Exception as e:
173
+ raise LLMError(f"API request failed: {str(e)}")
174
+
175
+ def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
176
+ """Stream the chat response in real-time! ⚡
177
+
178
+ Args:
179
+ payload: The prepared chat payload
180
+
181
+ Yields:
182
+ Streaming chunks of the response
183
+
184
+ Raises:
185
+ LLMError: If the stream request fails 🚫
186
+
187
+ Examples:
188
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
189
+ >>> for chunk in llm._stream_response(llm._prepare_payload([
190
+ ... {"role": "user", "content": "Tell me a story!"}
191
+ ... ])):
192
+ ... print(chunk, end='')
193
+ """
194
+ try:
195
+ with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
196
+ response.raise_for_status()
197
+ for line in response.iter_lines():
198
+ if line:
199
+ if line.strip() == b'data: [DONE]':
200
+ break
201
+ if line.startswith(b'data: '):
202
+ try:
203
+ chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
204
+ if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
205
+ yield content
206
+ except json.JSONDecodeError:
207
+ continue
208
+ except requests.RequestException as e:
209
+ raise LLMError(f"Stream request failed: {str(e)}")
210
+
211
+ def _send_request(self, payload: Dict[str, Any]) -> str:
212
+ """Send a non-streaming chat request.
213
+
214
+ Args:
215
+ payload: The prepared chat payload
216
+
217
+ Returns:
218
+ The complete response
219
+
220
+ Raises:
221
+ LLMError: If the request fails 🚫
222
+
223
+ Examples:
224
+ >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
225
+ >>> response = llm._send_request(llm._prepare_payload([
226
+ ... {"role": "user", "content": "Tell me a joke!"}
227
+ ... ]))
228
+ >>> print(response)
229
+ """
230
+ try:
231
+ response = requests.post(self.api_url, json=payload, headers=self.headers)
232
+ response.raise_for_status()
233
+ result = response.json()
234
+ return result['choices'][0]['message']['content']
235
+ except requests.RequestException as e:
236
+ raise LLMError(f"Request failed: {str(e)}")
237
+ except (KeyError, IndexError) as e:
238
+ raise LLMError(f"Invalid response format: {str(e)}")
239
+ except json.JSONDecodeError as e:
240
+ raise LLMError(f"Invalid JSON response: {str(e)}")
241
+
242
+
243
+ class VLM:
244
+ """Your gateway to vision-language AI magic! 🖼️
245
+
246
+ This class lets you:
247
+ - Chat about images with AI 🎨
248
+ - Get detailed image descriptions 📝
249
+ - Answer questions about images 🤔
250
+ - Stream responses in real-time ⚡
251
+
252
+ Examples:
253
+ >>> from webscout.LLM import VLM
254
+ >>> vlm = VLM("cogvlm-grounding-generalist")
255
+ >>> # Chat about an image
256
+ >>> response = vlm.chat([{
257
+ ... "role": "user",
258
+ ... "content": [
259
+ ... {"type": "image", "image_url": "path/to/image.jpg"},
260
+ ... {"type": "text", "text": "What's in this image?"}
261
+ ... ]
262
+ ... }])
263
+ >>> print(response)
264
+ 'I see a beautiful sunset over mountains...'
265
+ """
266
+
267
+ def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
268
+ """Get ready for some vision-language magic! 🚀
269
+
270
+ Args:
271
+ model: Your chosen vision model
272
+ system_message: Set the AI's personality
273
+
274
+ Examples:
275
+ >>> vlm = VLM("cogvlm-grounding-generalist")
276
+ >>> print(vlm.model)
277
+ 'cogvlm-grounding-generalist'
278
+ """
279
+ self.model = model
280
+ self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
281
+ self.conversation_history = [{"role": "system", "content": system_message}]
282
+ self.headers = {
283
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
284
+ 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
285
+ 'Cache-Control': 'no-cache',
286
+ 'Connection': 'keep-alive',
287
+ 'Content-Type': 'application/json',
288
+ 'Origin': 'https://deepinfra.com',
289
+ 'Pragma': 'no-cache',
290
+ 'Referer': 'https://deepinfra.com/',
291
+ 'Sec-Fetch-Dest': 'empty',
292
+ 'Sec-Fetch-Mode': 'cors',
293
+ 'Sec-Fetch-Site': 'same-site',
294
+ 'X-Deepinfra-Source': 'web-embed',
295
+ 'accept': 'text/event-stream',
296
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
297
+ 'sec-ch-ua-mobile': '?0',
298
+ 'sec-ch-ua-platform': '"macOS"'
299
+ }
300
+
301
+ def chat(
302
+ self,
303
+ messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]],
304
+ stream: bool = False,
305
+ temperature: float = 0.7,
306
+ max_tokens: int = 8028,
307
+ ) -> Union[str, Generator[str, None, None]]:
308
+ """Chat about images with AI! 🖼️
309
+
310
+ This method lets you:
311
+ - Ask questions about images 🤔
312
+ - Get detailed descriptions 📝
313
+ - Stream responses in real-time ⚡
314
+ - Control response creativity 🎨
315
+
316
+ Args:
317
+ messages: Your chat + image data
318
+ stream: Want real-time responses?
319
+ temperature: Creativity level (0-1)
320
+ max_tokens: Max words to generate
321
+
322
+ Returns:
323
+ Either a complete response or streaming generator
324
+
325
+ Raises:
326
+ LLMError: If something goes wrong 🚫
327
+
328
+ Examples:
329
+ >>> vlm = VLM("cogvlm-grounding-generalist")
330
+ >>> # Regular chat with image
331
+ >>> response = vlm.chat([{
332
+ ... "role": "user",
333
+ ... "content": [
334
+ ... {"type": "image", "image_url": "sunset.jpg"},
335
+ ... {"type": "text", "text": "Describe this scene"}
336
+ ... ]
337
+ ... }])
338
+ >>> # Streaming chat
339
+ >>> for chunk in vlm.chat([...], stream=True):
340
+ ... print(chunk, end='')
341
+ """
342
+ payload = {
343
+ "model": self.model,
344
+ "messages": messages,
345
+ "stream": stream,
346
+ "temperature": temperature,
347
+ "max_tokens": max_tokens
348
+ }
349
+
350
+ try:
351
+ if stream:
352
+ return self._stream_response(payload)
353
+ else:
354
+ return self._send_request(payload)
355
+ except Exception as e:
356
+ raise LLMError(f"VLM API request failed: {str(e)}")
357
+
358
+ def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
359
+ """Stream the VLM chat response."""
360
+ try:
361
+ with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
362
+ response.raise_for_status()
363
+ for line in response.iter_lines():
364
+ if line:
365
+ if line.strip() == b'data: [DONE]':
366
+ break
367
+ if line.startswith(b'data: '):
368
+ try:
369
+ chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
370
+ if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
371
+ yield content
372
+ except json.JSONDecodeError:
373
+ continue
374
+ except requests.RequestException as e:
375
+ raise LLMError(f"VLM stream request failed: {str(e)}")
376
+
377
+ def _send_request(self, payload: Dict[str, Any]) -> str:
378
+ """Send a non-streaming VLM chat request."""
379
+ try:
380
+ response = requests.post(self.api_url, json=payload, headers=self.headers)
381
+ response.raise_for_status()
382
+ result = response.json()
383
+ return result['choices'][0]['message']['content']
384
+ except requests.RequestException as e:
385
+ raise LLMError(f"VLM request failed: {str(e)}")
386
+ except (KeyError, IndexError) as e:
387
+ raise LLMError(f"Invalid VLM response format: {str(e)}")
388
+ except json.JSONDecodeError as e:
389
+ raise LLMError(f"Invalid VLM JSON response: {str(e)}")
390
+
391
+
392
+ def encode_image_to_base64(image_path: str) -> str:
393
+ """Turn your image into base64 magic! 🎨
394
+
395
+ Args:
396
+ image_path: Where's your image at?
397
+
398
+ Returns:
399
+ Your image as a base64 string ✨
400
+
401
+ Raises:
402
+ IOError: If we can't read your image 🚫
403
+
404
+ Examples:
405
+ >>> from webscout.LLM import encode_image_to_base64
406
+ >>> image_data = encode_image_to_base64("cool_pic.jpg")
407
+ >>> print(len(image_data)) # Check the encoded length
408
+ 12345
409
+ """
410
+ try:
411
+ with open(image_path, "rb") as image_file:
412
+ return base64.b64encode(image_file.read()).decode("utf-8")
413
+ except IOError as e:
414
+ raise LLMError(f"Failed to read image file: {str(e)}")
415
+
416
+
417
+ if __name__ == "__main__":
418
+ # Example usage
419
+ try:
420
+ # Initialize LLM with Llama 3 model
421
+ llm = LLM(model="mistralai/Mistral-Small-24B-Instruct-2501")
422
+
423
+ # Example messages
424
+ messages = [
425
+ {"role": "user", "content": "Write a short poem about AI."}
426
+ ]
427
+
428
+ # Example 1: Non-streaming response
429
+ print("\nNon-streaming response:")
430
+ response = llm.chat(messages, stream=False)
431
+ print(response)
432
+
433
+ # Example 2: Streaming response
434
+ print("\nStreaming response:")
435
+ for chunk in llm.chat(messages, stream=True):
436
+ print(chunk, end='', flush=True)
437
+ print("\n")
438
+
439
+ except LLMError as e:
440
+ print(f"Error: {str(e)}")
441
+ except KeyboardInterrupt:
442
+ print("\nOperation cancelled by user")