webscout 4.3__py3-none-any.whl → 4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/GoogleS.py ADDED
@@ -0,0 +1,342 @@
1
+ import os
2
+ import random
3
+ import sys
4
+ import time
5
+ import ssl
6
+
7
+ if sys.version_info[0] > 2:
8
+ from http.cookiejar import LWPCookieJar
9
+ from urllib.request import Request, urlopen
10
+ from urllib.parse import quote_plus, urlparse, parse_qs
11
+ else:
12
+ from cookielib import LWPCookieJar
13
+ from urllib import quote_plus
14
+ from urllib2 import Request, urlopen
15
+ from urlparse import urlparse, parse_qs
16
+
17
+ try:
18
+ from bs4 import BeautifulSoup
19
+ is_bs4 = True
20
+ except ImportError:
21
+ from BeautifulSoup import BeautifulSoup # type: ignore
22
+ is_bs4 = False
23
+
24
+ __all__ = [
25
+
26
+ # Main search function.
27
+ 'search',
28
+
29
+ # Shortcut for "get lucky" search.
30
+ 'lucky',
31
+
32
+ # Miscellaneous utility functions.
33
+ 'get_random_user_agent', 'get_tbs',
34
+ ]
35
+
36
+ # URL templates to make Google searches.
37
+ url_home = "https://www.google.%(tld)s/"
38
+ url_search = "https://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&" \
39
+ "btnG=Google+Search&tbs=%(tbs)s&safe=%(safe)s&" \
40
+ "cr=%(country)s"
41
+ url_next_page = "https://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&" \
42
+ "start=%(start)d&tbs=%(tbs)s&safe=%(safe)s&" \
43
+ "cr=%(country)s"
44
+ url_search_num = "https://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&" \
45
+ "num=%(num)d&btnG=Google+Search&tbs=%(tbs)s&safe=%(safe)s&" \
46
+ "cr=%(country)s"
47
+ url_next_page_num = "https://www.google.%(tld)s/search?hl=%(lang)s&" \
48
+ "q=%(query)s&num=%(num)d&start=%(start)d&tbs=%(tbs)s&" \
49
+ "safe=%(safe)s&cr=%(country)s"
50
+ url_parameters = (
51
+ 'hl', 'q', 'num', 'btnG', 'start', 'tbs', 'safe', 'cr')
52
+
53
+ # Cookie jar. Stored at the user's home folder.
54
+ # If the cookie jar is inaccessible, the errors are ignored.
55
+ home_folder = os.getenv('HOME')
56
+ if not home_folder:
57
+ home_folder = os.getenv('USERHOME')
58
+ if not home_folder:
59
+ home_folder = '.' # Use the current folder on error.
60
+ cookie_jar = LWPCookieJar(os.path.join(home_folder, '.google-cookie'))
61
+ try:
62
+ cookie_jar.load()
63
+ except Exception:
64
+ pass
65
+
66
+ # Default user agent, unless instructed by the user to change it.
67
+ USER_AGENT = 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)'
68
+
69
+ try:
70
+ install_folder = os.path.abspath(os.path.split(__file__)[0])
71
+ try:
72
+ user_agents_file = os.path.join(install_folder, 'user_agents.txt.gz')
73
+ import gzip
74
+ fp = gzip.open(user_agents_file, 'rb')
75
+ try:
76
+ user_agents_list = [_.strip() for _ in fp.readlines()]
77
+ finally:
78
+ fp.close()
79
+ del fp
80
+ except Exception:
81
+ user_agents_file = os.path.join(install_folder, 'user_agents.txt')
82
+ with open(user_agents_file) as fp:
83
+ user_agents_list = [_.strip() for _ in fp.readlines()]
84
+ except Exception:
85
+ user_agents_list = [USER_AGENT]
86
+
87
+
88
+ # Get a random user agent.
89
+ def get_random_user_agent():
90
+ """
91
+ Get a random user agent string.
92
+
93
+ :rtype: str
94
+ :return: Random user agent string.
95
+ """
96
+ return random.choice(user_agents_list)
97
+
98
+
99
+ # Helper function to format the tbs parameter.
100
+ def get_tbs(from_date, to_date):
101
+ """
102
+ Helper function to format the tbs parameter.
103
+
104
+ :param datetime.date from_date: Python date object.
105
+ :param datetime.date to_date: Python date object.
106
+
107
+ :rtype: str
108
+ :return: Dates encoded in tbs format.
109
+ """
110
+ from_date = from_date.strftime('%m/%d/%Y')
111
+ to_date = to_date.strftime('%m/%d/%Y')
112
+ return 'cdr:1,cd_min:%(from_date)s,cd_max:%(to_date)s' % vars()
113
+
114
+
115
+ # Request the given URL and return the response page, using the cookie jar.
116
+ # If the cookie jar is inaccessible, the errors are ignored.
117
+ def get_page(url, user_agent=None, verify_ssl=True):
118
+ """
119
+ Request the given URL and return the response page, using the cookie jar.
120
+
121
+ :param str url: URL to retrieve.
122
+ :param str user_agent: User agent for the HTTP requests.
123
+ Use None for the default.
124
+ :param bool verify_ssl: Verify the SSL certificate to prevent
125
+ traffic interception attacks. Defaults to True.
126
+
127
+ :rtype: str
128
+ :return: Web page retrieved for the given URL.
129
+
130
+ :raises IOError: An exception is raised on error.
131
+ :raises urllib2.URLError: An exception is raised on error.
132
+ :raises urllib2.HTTPError: An exception is raised on error.
133
+ """
134
+ if user_agent is None:
135
+ user_agent = USER_AGENT
136
+ request = Request(url)
137
+ request.add_header('User-Agent', user_agent)
138
+ cookie_jar.add_cookie_header(request)
139
+ if verify_ssl:
140
+ response = urlopen(request)
141
+ else:
142
+ context = ssl._create_unverified_context()
143
+ response = urlopen(request, context=context)
144
+ cookie_jar.extract_cookies(response, request)
145
+ html = response.read()
146
+ response.close()
147
+ try:
148
+ cookie_jar.save()
149
+ except Exception:
150
+ pass
151
+ return html
152
+
153
+
154
+ # Filter links found in the Google result pages HTML code.
155
+ # Returns None if the link doesn't yield a valid result.
156
+ def filter_result(link):
157
+ try:
158
+
159
+ # Decode hidden URLs.
160
+ if link.startswith('/url?'):
161
+ o = urlparse(link, 'http')
162
+ link = parse_qs(o.query)['q'][0]
163
+
164
+ # Valid results are absolute URLs not pointing to a Google domain,
165
+ # like images.google.com or googleusercontent.com for example.
166
+ # TODO this could be improved!
167
+ o = urlparse(link, 'http')
168
+ if o.netloc and 'google' not in o.netloc:
169
+ return link
170
+
171
+ # On error, return None.
172
+ except Exception:
173
+ pass
174
+
175
+
176
+ # Returns a generator that yields URLs.
177
+ def search(query, tld='com', lang='en', tbs='0', safe='off', num=10, start=0,
178
+ stop=None, pause=2.0, country='', extra_params=None,
179
+ user_agent=None, verify_ssl=True):
180
+ """
181
+ Search the given query string using Google.
182
+
183
+ :param str query: Query string. Must NOT be url-encoded.
184
+ :param str tld: Top level domain.
185
+ :param str lang: Language.
186
+ :param str tbs: Time limits (i.e "qdr:h" => last hour,
187
+ "qdr:d" => last 24 hours, "qdr:m" => last month).
188
+ :param str safe: Safe search.
189
+ :param int num: Number of results per page.
190
+ :param int start: First result to retrieve.
191
+ :param int stop: Last result to retrieve.
192
+ Use None to keep searching forever.
193
+ :param float pause: Lapse to wait between HTTP requests.
194
+ A lapse too long will make the search slow, but a lapse too short may
195
+ cause Google to block your IP. Your mileage may vary!
196
+ :param str country: Country or region to focus the search on. Similar to
197
+ changing the TLD, but does not yield exactly the same results.
198
+ Only Google knows why...
199
+ :param dict extra_params: A dictionary of extra HTTP GET
200
+ parameters, which must be URL encoded. For example if you don't want
201
+ Google to filter similar results you can set the extra_params to
202
+ {'filter': '0'} which will append '&filter=0' to every query.
203
+ :param str user_agent: User agent for the HTTP requests.
204
+ Use None for the default.
205
+ :param bool verify_ssl: Verify the SSL certificate to prevent
206
+ traffic interception attacks. Defaults to True.
207
+
208
+ :rtype: generator of str
209
+ :return: Generator (iterator) that yields found URLs.
210
+ If the stop parameter is None the iterator will loop forever.
211
+ """
212
+ # Set of hashes for the results found.
213
+ # This is used to avoid repeated results.
214
+ hashes = set()
215
+
216
+ # Count the number of links yielded.
217
+ count = 0
218
+
219
+ # Prepare the search string.
220
+ query = quote_plus(query)
221
+
222
+ # If no extra_params is given, create an empty dictionary.
223
+ # We should avoid using an empty dictionary as a default value
224
+ # in a function parameter in Python.
225
+ if not extra_params:
226
+ extra_params = {}
227
+
228
+ # Check extra_params for overlapping.
229
+ for builtin_param in url_parameters:
230
+ if builtin_param in extra_params.keys():
231
+ raise ValueError(
232
+ 'GET parameter "%s" is overlapping with \
233
+ the built-in GET parameter',
234
+ builtin_param
235
+ )
236
+
237
+ # Grab the cookie from the home page.
238
+ get_page(url_home % vars(), user_agent, verify_ssl)
239
+
240
+ # Prepare the URL of the first request.
241
+ if start:
242
+ if num == 10:
243
+ url = url_next_page % vars()
244
+ else:
245
+ url = url_next_page_num % vars()
246
+ else:
247
+ if num == 10:
248
+ url = url_search % vars()
249
+ else:
250
+ url = url_search_num % vars()
251
+
252
+ # Loop until we reach the maximum result, if any (otherwise, loop forever).
253
+ while not stop or count < stop:
254
+
255
+ # Remeber last count to detect the end of results.
256
+ last_count = count
257
+
258
+ # Append extra GET parameters to the URL.
259
+ # This is done on every iteration because we're
260
+ # rebuilding the entire URL at the end of this loop.
261
+ for k, v in extra_params.items():
262
+ k = quote_plus(k)
263
+ v = quote_plus(v)
264
+ url = url + ('&%s=%s' % (k, v))
265
+
266
+ # Sleep between requests.
267
+ # Keeps Google from banning you for making too many requests.
268
+ time.sleep(pause)
269
+
270
+ # Request the Google Search results page.
271
+ html = get_page(url, user_agent, verify_ssl)
272
+
273
+ # Parse the response and get every anchored URL.
274
+ if is_bs4:
275
+ soup = BeautifulSoup(html, 'html.parser')
276
+ else:
277
+ soup = BeautifulSoup(html)
278
+ try:
279
+ anchors = soup.find(id='search').findAll('a')
280
+ # Sometimes (depending on the User-agent) there is
281
+ # no id "search" in html response...
282
+ except AttributeError:
283
+ # Remove links of the top bar.
284
+ gbar = soup.find(id='gbar')
285
+ if gbar:
286
+ gbar.clear()
287
+ anchors = soup.findAll('a')
288
+
289
+ # Process every anchored URL.
290
+ for a in anchors:
291
+
292
+ # Get the URL from the anchor tag.
293
+ try:
294
+ link = a['href']
295
+ except KeyError:
296
+ continue
297
+
298
+ # Filter invalid links and links pointing to Google itself.
299
+ link = filter_result(link)
300
+ if not link:
301
+ continue
302
+
303
+ # Discard repeated results.
304
+ h = hash(link)
305
+ if h in hashes:
306
+ continue
307
+ hashes.add(h)
308
+
309
+ # Yield the result.
310
+ yield link
311
+
312
+ # Increase the results counter.
313
+ # If we reached the limit, stop.
314
+ count += 1
315
+ if stop and count >= stop:
316
+ return
317
+
318
+ # End if there are no more results.
319
+ # XXX TODO review this logic, not sure if this is still true!
320
+ if last_count == count:
321
+ break
322
+
323
+ # Prepare the URL for the next request.
324
+ start += num
325
+ if num == 10:
326
+ url = url_next_page % vars()
327
+ else:
328
+ url = url_next_page_num % vars()
329
+
330
+
331
+ # Shortcut to single-item search.
332
+ # Evaluates the iterator to return the single URL as a string.
333
+ def lucky(*args, **kwargs):
334
+ """
335
+ Shortcut to single-item search.
336
+
337
+ Same arguments as the main search function, but the return value changes.
338
+
339
+ :rtype: str
340
+ :return: URL found by Google.
341
+ """
342
+ return next(search(*args, **kwargs))
@@ -0,0 +1,275 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from webscout.AIutel import Optimizers
23
+ from webscout.AIutel import Conversation
24
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
25
+ from webscout.AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+ from webscout import WEBS
31
+ from rich import print
32
+
33
+ class AndiSearch(Provider):
34
+ def __init__(
35
+ self,
36
+ is_conversation: bool = True,
37
+ max_tokens: int = 600,
38
+ timeout: int = 30,
39
+ intro: str = None,
40
+ filepath: str = None,
41
+ update_file: bool = True,
42
+ proxies: dict = {},
43
+ history_offset: int = 10250,
44
+ act: str = None,
45
+ ):
46
+ """Instantiates AndiSearch
47
+
48
+ Args:
49
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
50
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
51
+ timeout (int, optional): Http request timeout. Defaults to 30.
52
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
53
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
54
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
55
+ proxies (dict, optional): Http request proxies. Defaults to {}.
56
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
57
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
58
+ """
59
+ self.session = requests.Session()
60
+ self.is_conversation = is_conversation
61
+ self.max_tokens_to_sample = max_tokens
62
+ self.chat_endpoint = "https://write.andisearch.com/v1/write_streaming"
63
+ self.stream_chunk_size = 64
64
+ self.timeout = timeout
65
+ self.last_response = {}
66
+ self.headers = {
67
+ "accept": "text/event-stream",
68
+ "accept-encoding": "gzip, deflate, br, zstd",
69
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
70
+ "andi-auth-key": "andi-summarizer",
71
+ "andi-origin": "x-andi-origin",
72
+ "authorization": str(uuid4()),
73
+ "content-type": "application/json",
74
+ "dnt": "1",
75
+ "origin": "https://andisearch.com",
76
+ "priority": "u=1, i",
77
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
78
+ "sec-ch-ua-mobile": "?0",
79
+ "sec-ch-ua-platform": '"Windows"',
80
+ "sec-fetch-dest": "empty",
81
+ "sec-fetch-mode": "cors",
82
+ "sec-fetch-site": "same-site",
83
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
84
+ "x-amz-date": "20240730T031106Z",
85
+ "x-amz-security-token": str(uuid4()),
86
+ }
87
+
88
+ self.__available_optimizers = (
89
+ method
90
+ for method in dir(Optimizers)
91
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
+ )
93
+ self.session.headers.update(self.headers)
94
+ Conversation.intro = (
95
+ AwesomePrompts().get_act(
96
+ act, raise_not_found=True, default=None, case_insensitive=True
97
+ )
98
+ if act
99
+ else intro or Conversation.intro
100
+ )
101
+ self.conversation = Conversation(
102
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
103
+ )
104
+ self.conversation.history_offset = history_offset
105
+ self.session.proxies = proxies
106
+
107
+ def ask(
108
+ self,
109
+ prompt: str,
110
+ stream: bool = False,
111
+ raw: bool = False,
112
+ optimizer: str = None,
113
+ conversationally: bool = False,
114
+ ) -> dict:
115
+ """Chat with AI
116
+
117
+ Args:
118
+ prompt (str): Prompt to be send.
119
+ stream (bool, optional): Flag for streaming response. Defaults to False.
120
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
121
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
122
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
123
+ Returns:
124
+ dict : {}
125
+ ```json
126
+ {
127
+ "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
128
+ "object": "chat.completion",
129
+ "created": 1704623244,
130
+ "model": "gpt-3.5-turbo",
131
+ "usage": {
132
+ "prompt_tokens": 0,
133
+ "completion_tokens": 0,
134
+ "total_tokens": 0
135
+ },
136
+ "choices": [
137
+ {
138
+ "message": {
139
+ "role": "assistant",
140
+ "content": "Hello! How can I assist you today?"
141
+ },
142
+ "finish_reason": "stop",
143
+ "index": 0
144
+ }
145
+ ]
146
+ }
147
+ ```
148
+ """
149
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
150
+ if optimizer:
151
+ if optimizer in self.__available_optimizers:
152
+ conversation_prompt = getattr(Optimizers, optimizer)(
153
+ conversation_prompt if conversationally else prompt
154
+ )
155
+ else:
156
+ raise Exception(
157
+ f"Optimizer is not one of {self.__available_optimizers}"
158
+ )
159
+
160
+ # Initialize the webscout instance
161
+ webs = WEBS()
162
+
163
+ # Fetch search results
164
+ search_query = prompt
165
+ search_results = webs.text(search_query, max_results=7)
166
+
167
+ # Format the search results into the required serp payload structure
168
+ serp_payload = {
169
+ "query": search_query,
170
+ "serp": {
171
+ "results_type": "Search",
172
+ "answer": "",
173
+ "type": "navigation",
174
+ "title": "",
175
+ "description": "",
176
+ "image": "",
177
+ "link": "",
178
+ "source": "liftndrift.com",
179
+ "engine": "andi-b",
180
+ "results": [
181
+ {
182
+ "title": result["title"],
183
+ "link": result["href"],
184
+ "desc": result["body"],
185
+ "image": "",
186
+ "type": "website",
187
+ "source": result["href"].split("//")[1].split("/")[0] # Extract the domain name
188
+ }
189
+ for result in search_results
190
+ ]
191
+ }
192
+ }
193
+ self.session.headers.update(self.headers)
194
+ payload = serp_payload
195
+
196
+ def for_stream():
197
+ response = self.session.post(
198
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
199
+ )
200
+ if not response.ok:
201
+ raise exceptions.FailedToGenerateResponseError(
202
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
203
+ )
204
+
205
+ streaming_text = ""
206
+ for value in response.iter_lines(
207
+ decode_unicode=True,
208
+ chunk_size=self.stream_chunk_size,
209
+ delimiter="\n",
210
+ ):
211
+ try:
212
+ if bool(value):
213
+ streaming_text += value + ("\n" if stream else "")
214
+ resp = dict(text=streaming_text)
215
+ self.last_response.update(resp)
216
+ yield value if raw else resp
217
+ except json.decoder.JSONDecodeError:
218
+ pass
219
+ self.conversation.update_chat_history(
220
+ prompt, self.get_message(self.last_response)
221
+ )
222
+
223
+ def for_non_stream():
224
+ for _ in for_stream():
225
+ pass
226
+ return self.last_response
227
+
228
+ return for_stream() if stream else for_non_stream()
229
+
230
+ def chat(
231
+ self,
232
+ prompt: str,
233
+ stream: bool = False,
234
+ optimizer: str = None,
235
+ conversationally: bool = False,
236
+ ) -> str:
237
+ """Generate response `str`
238
+ Args:
239
+ prompt (str): Prompt to be send.
240
+ stream (bool, optional): Flag for streaming response. Defaults to False.
241
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
242
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
243
+ Returns:
244
+ str: Response generated
245
+ """
246
+
247
+ def for_stream():
248
+ for response in self.ask(
249
+ prompt, True, optimizer=optimizer, conversationally=conversationally
250
+ ):
251
+ yield self.get_message(response)
252
+
253
+ def for_non_stream():
254
+ return self.get_message(
255
+ self.ask(
256
+ prompt,
257
+ False,
258
+ optimizer=optimizer,
259
+ conversationally=conversationally,
260
+ )
261
+ )
262
+
263
+ return for_stream() if stream else for_non_stream()
264
+
265
+ def get_message(self, response: dict) -> str:
266
+ """Retrieves message only from response
267
+
268
+ Args:
269
+ response (dict): Response generated by `self.ask`
270
+
271
+ Returns:
272
+ str: Message extracted
273
+ """
274
+ assert isinstance(response, dict), "Response should be of dict data-type only"
275
+ return response["text"]
@@ -38,6 +38,7 @@ from .Geminipro import GEMINIPRO
38
38
  from .Geminiflash import GEMINIFLASH
39
39
  from .OLLAMA import OLLAMA
40
40
  from .FreeGemini import FreeGemini
41
+ from .Andi import AndiSearch
41
42
  __all__ = [
42
43
  'ThinkAnyAI',
43
44
  'Xjai',
@@ -78,7 +79,8 @@ __all__ = [
78
79
  'GEMINIPRO',
79
80
  'GEMINIFLASH',
80
81
  'OLLAMA',
81
- 'FreeGemini'
82
+ 'FreeGemini',
83
+ 'AndiSearch'
82
84
 
83
85
 
84
86
  ]
webscout/__init__.py CHANGED
@@ -5,16 +5,16 @@ from .DWEBS import *
5
5
  from .transcriber import transcriber
6
6
  from .voice import play_audio
7
7
  from .websx_search import WEBSX
8
-
9
8
  from .LLM import VLM, LLM
10
9
  from .YTdownloader import *
11
- # from .Local import *
10
+
12
11
  import g4f
13
12
  from .YTdownloader import *
14
13
  from .Provider import *
15
14
  from .Extra import gguf
16
15
  from .Extra import autollama
17
16
  from .Extra import weather_ascii, weather
17
+ from .Agents import *
18
18
  __repo__ = "https://github.com/OE-LUCIFER/Webscout"
19
19
 
20
20
  webai = [
@@ -44,7 +44,8 @@ webai = [
44
44
  "vtlchat",
45
45
  "geminiflash",
46
46
  "geminipro",
47
- "ollama"
47
+ "ollama",
48
+ "andi",
48
49
  ]
49
50
 
50
51
  gpt4free_providers = [
@@ -58,12 +59,15 @@ __all__ = [
58
59
  "WEBS",
59
60
  "AsyncWEBS",
60
61
  "__version__",
61
- "DeepWEBS",
62
+ "DWEBS",
62
63
  "transcriber",
63
64
  "play_audio",
64
65
  "TempMailClient",
65
66
  "TemporaryPhoneNumber",
66
67
  "LLM",
68
+ "YTdownloader",
69
+ "WEBSX",
70
+ "VLM",
67
71
  # Localai models and utilities
68
72
  # "Model",
69
73
  # "Thread",