webscout 1.4.5__py3-none-any.whl → 1.4.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AI.py CHANGED
@@ -3277,6 +3277,7 @@ class OPENGPT:
3277
3277
  str: Message extracted
3278
3278
  """
3279
3279
  assert isinstance(response, dict), "Response should be of dict data-type only"
3280
+ return response["content"]
3280
3281
  class AsyncOPENGPT(AsyncProvider):
3281
3282
  def __init__(
3282
3283
  self,
webscout/AIauto.py ADDED
@@ -0,0 +1,452 @@
1
+ from webscout.AI import Provider, AsyncProvider
2
+ from webscout.AI import OPENGPT, AsyncOPENGPT
3
+ from webscout.AI import KOBOLDAI, AsyncKOBOLDAI
4
+ from webscout.AI import PhindSearch, AsyncPhindSearch
5
+ from webscout.AI import LLAMA2, AsyncLLAMA2
6
+ from webscout.AI import BLACKBOXAI, AsyncBLACKBOXAI
7
+ from webscout.AI import PERPLEXITY
8
+ from webscout.AI import ThinkAnyAI
9
+ from webscout.AI import YouChat
10
+ from webscout.AI import YEPCHAT
11
+ from webscout.g4f import GPT4FREE, AsyncGPT4FREE
12
+ from webscout.g4f import TestProviders
13
+ from webscout.exceptions import AllProvidersFailure
14
+ from webscout.async_providers import mapper as async_provider_map
15
+ from typing import AsyncGenerator
16
+
17
+ from typing import Union
18
+ from typing import Any
19
+ import logging
20
+
21
+
22
+ provider_map: dict[
23
+ str, Union[OPENGPT, KOBOLDAI, PhindSearch, LLAMA2, BLACKBOXAI, PERPLEXITY, GPT4FREE, ThinkAnyAI, YEPCHAT, YouChat]
24
+ ] = {
25
+ "PhindSearch": PhindSearch,
26
+ "perplexity": PERPLEXITY,
27
+ "opengpt": OPENGPT,
28
+ "koboldai": KOBOLDAI,
29
+ "llama2": LLAMA2,
30
+ "blackboxai": BLACKBOXAI,
31
+ "gpt4free": GPT4FREE,
32
+ "thinkany": ThinkAnyAI,
33
+ "yepchat": YEPCHAT,
34
+ "you": YouChat,
35
+ }
36
+
37
+
38
+ class AUTO(Provider):
39
+ def __init__(
40
+ self,
41
+ is_conversation: bool = True,
42
+ max_tokens: int = 600,
43
+ timeout: int = 30,
44
+ intro: str = None,
45
+ filepath: str = None,
46
+ update_file: bool = True,
47
+ proxies: dict = {},
48
+ history_offset: int = 10250,
49
+ act: str = None,
50
+ exclude: list[str] = [],
51
+ ):
52
+ """Instantiates AUTO
53
+
54
+ Args:
55
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
56
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
57
+ timeout (int, optional): Http request timeout. Defaults to 30.
58
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
59
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
60
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
61
+ proxies (dict, optional): Http request proxies. Defaults to {}.
62
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
63
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
64
+ exclude(list[str], optional): List of providers to be excluded. Defaults to [].
65
+ """
66
+ self.provider: Union[OPENGPT, KOBOLDAI, PhindSearch, LLAMA2, BLACKBOXAI, PERPLEXITY, GPT4FREE, ThinkAnyAI, YEPCHAT, YouChat] = None
67
+ self.provider_name: str = None
68
+ self.is_conversation = is_conversation
69
+ self.max_tokens = max_tokens
70
+ self.timeout = timeout
71
+ self.intro = intro
72
+ self.filepath = filepath
73
+ self.update_file = update_file
74
+ self.proxies = proxies
75
+ self.history_offset = history_offset
76
+ self.act = act
77
+ self.exclude = exclude
78
+
79
+ @property
80
+ def last_response(self) -> dict[str, Any]:
81
+ return self.provider.last_response
82
+
83
+ @property
84
+ def conversation(self) -> object:
85
+ return self.provider.conversation
86
+
87
+ def ask(
88
+ self,
89
+ prompt: str,
90
+ stream: bool = False,
91
+ raw: bool = False,
92
+ optimizer: str = None,
93
+ conversationally: bool = False,
94
+ run_new_test: bool = False,
95
+ ) -> dict:
96
+ """Chat with AI
97
+
98
+ Args:
99
+ prompt (str): Prompt to be send.
100
+ stream (bool, optional): Flag for streaming response. Defaults to False.
101
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
102
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
103
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
104
+ run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
105
+ Returns:
106
+ dict : {}
107
+ """
108
+ ask_kwargs: dict[str, Union[str, bool]] = {
109
+ "prompt": prompt,
110
+ "stream": stream,
111
+ "raw": raw,
112
+ "optimizer": optimizer,
113
+ "conversationally": conversationally,
114
+ }
115
+
116
+ # tgpt-based providers
117
+ for provider_name, provider_obj in provider_map.items():
118
+ # continue
119
+ if provider_name in self.exclude:
120
+ continue
121
+ try:
122
+ self.provider_name = f"tgpt-{provider_name}"
123
+ self.provider = provider_obj(
124
+ is_conversation=self.is_conversation,
125
+ max_tokens=self.max_tokens,
126
+ timeout=self.timeout,
127
+ intro=self.intro,
128
+ filepath=self.filepath,
129
+ update_file=self.update_file,
130
+ proxies=self.proxies,
131
+ history_offset=self.history_offset,
132
+ act=self.act,
133
+ )
134
+
135
+ def for_stream():
136
+ for chunk in self.provider.ask(**ask_kwargs):
137
+ yield chunk
138
+
139
+ def for_non_stream():
140
+ return self.provider.ask(**ask_kwargs)
141
+
142
+ return for_stream() if stream else for_non_stream()
143
+
144
+ except Exception as e:
145
+ logging.debug(
146
+ f"Failed to generate response using provider {provider_name} - {e}"
147
+ )
148
+
149
+ # g4f-based providers
150
+
151
+ for provider_info in TestProviders(timeout=self.timeout).get_results(
152
+ run=run_new_test
153
+ ):
154
+ if provider_info["name"] in self.exclude:
155
+ continue
156
+ try:
157
+ self.provider_name = f"g4f-{provider_info['name']}"
158
+ self.provider = GPT4FREE(
159
+ provider=provider_info["name"],
160
+ is_conversation=self.is_conversation,
161
+ max_tokens=self.max_tokens,
162
+ intro=self.intro,
163
+ filepath=self.filepath,
164
+ update_file=self.update_file,
165
+ proxies=self.proxies,
166
+ history_offset=self.history_offset,
167
+ act=self.act,
168
+ )
169
+
170
+ def for_stream():
171
+ for chunk in self.provider.ask(**ask_kwargs):
172
+ yield chunk
173
+
174
+ def for_non_stream():
175
+ return self.provider.ask(**ask_kwargs)
176
+
177
+ return for_stream() if stream else for_non_stream()
178
+
179
+ except Exception as e:
180
+ logging.debug(
181
+ f"Failed to generate response using GPT4FREE-base provider {provider_name} - {e}"
182
+ )
183
+
184
+ raise AllProvidersFailure(
185
+ "None of the providers generated response successfully."
186
+ )
187
+
188
+ def chat(
189
+ self,
190
+ prompt: str,
191
+ stream: bool = False,
192
+ optimizer: str = None,
193
+ conversationally: bool = False,
194
+ run_new_test: bool = False,
195
+ ) -> str:
196
+ """Generate response `str`
197
+ Args:
198
+ prompt (str): Prompt to be send.
199
+ stream (bool, optional): Flag for streaming response. Defaults to False.
200
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
201
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
202
+ run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
203
+ Returns:
204
+ str: Response generated
205
+ """
206
+
207
+ def for_stream():
208
+ for response in self.ask(
209
+ prompt,
210
+ True,
211
+ optimizer=optimizer,
212
+ conversationally=conversationally,
213
+ run_new_test=run_new_test,
214
+ ):
215
+ yield self.get_message(response)
216
+
217
+ def for_non_stream():
218
+ ask_response = self.ask(
219
+ prompt,
220
+ False,
221
+ optimizer=optimizer,
222
+ conversationally=conversationally,
223
+ run_new_test=run_new_test,
224
+ )
225
+ return self.get_message(ask_response)
226
+
227
+ return for_stream() if stream else for_non_stream()
228
+
229
+ def get_message(self, response: dict) -> str:
230
+ """Retrieves message only from response
231
+
232
+ Args:
233
+ response (dict): Response generated by `self.ask`
234
+
235
+ Returns:
236
+ str: Message extracted
237
+ """
238
+ assert self.provider is not None, "Chat with AI first"
239
+ return self.provider.get_message(response)
240
+
241
+
242
+ class AsyncAUTO(AsyncProvider):
243
+ def __init__(
244
+ self,
245
+ is_conversation: bool = True,
246
+ max_tokens: int = 600,
247
+ timeout: int = 30,
248
+ intro: str = None,
249
+ filepath: str = None,
250
+ update_file: bool = True,
251
+ proxies: dict = {},
252
+ history_offset: int = 10250,
253
+ act: str = None,
254
+ exclude: list[str] = [],
255
+ ):
256
+ """Instantiates AsyncAUTO
257
+
258
+ Args:
259
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
260
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
261
+ timeout (int, optional): Http request timeout. Defaults to 30.
262
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
263
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
264
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
265
+ proxies (dict, optional): Http request proxies. Defaults to {}.
266
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
267
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
268
+ exclude(list[str], optional): List of providers to be excluded. Defaults to [].
269
+ """
270
+ self.provider: Union[
271
+ AsyncOPENGPT,
272
+ AsyncKOBOLDAI,
273
+ AsyncPhindSearch,
274
+ AsyncLLAMA2,
275
+ AsyncBLACKBOXAI,
276
+ AsyncGPT4FREE,
277
+ ] = None
278
+ self.provider_name: str = None
279
+ self.is_conversation = is_conversation
280
+ self.max_tokens = max_tokens
281
+ self.timeout = timeout
282
+ self.intro = intro
283
+ self.filepath = filepath
284
+ self.update_file = update_file
285
+ self.proxies = proxies
286
+ self.history_offset = history_offset
287
+ self.act = act
288
+ self.exclude = exclude
289
+
290
+ @property
291
+ def last_response(self) -> dict[str, Any]:
292
+ return self.provider.last_response
293
+
294
+ @property
295
+ def conversation(self) -> object:
296
+ return self.provider.conversation
297
+
298
+ async def ask(
299
+ self,
300
+ prompt: str,
301
+ stream: bool = False,
302
+ raw: bool = False,
303
+ optimizer: str = None,
304
+ conversationally: bool = False,
305
+ run_new_test: bool = False,
306
+ ) -> dict | AsyncGenerator:
307
+ """Chat with AI asynchronously.
308
+
309
+ Args:
310
+ prompt (str): Prompt to be send.
311
+ stream (bool, optional): Flag for streaming response. Defaults to False.
312
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
313
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
314
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
315
+ run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
316
+ Returns:
317
+ dict|AsyncGenerator : ai response.
318
+ """
319
+ ask_kwargs: dict[str, Union[str, bool]] = {
320
+ "prompt": prompt,
321
+ "stream": stream,
322
+ "raw": raw,
323
+ "optimizer": optimizer,
324
+ "conversationally": conversationally,
325
+ }
326
+
327
+ # tgpt-based providers
328
+ for provider_name, provider_obj in async_provider_map.items():
329
+ if provider_name in self.exclude:
330
+ continue
331
+ try:
332
+ self.provider_name = f"tgpt-{provider_name}"
333
+ self.provider = provider_obj(
334
+ is_conversation=self.is_conversation,
335
+ max_tokens=self.max_tokens,
336
+ timeout=self.timeout,
337
+ intro=self.intro,
338
+ filepath=self.filepath,
339
+ update_file=self.update_file,
340
+ proxies=self.proxies,
341
+ history_offset=self.history_offset,
342
+ act=self.act,
343
+ )
344
+
345
+ async def for_stream():
346
+ async_ask = await self.provider.ask(**ask_kwargs)
347
+ async for chunk in async_ask:
348
+ yield chunk
349
+
350
+ async def for_non_stream():
351
+ return await self.provider.ask(**ask_kwargs)
352
+
353
+ return for_stream() if stream else await for_non_stream()
354
+
355
+ except Exception as e:
356
+ logging.debug(
357
+ f"Failed to generate response using provider {provider_name} - {e}"
358
+ )
359
+
360
+ # g4f-based providers
361
+
362
+ for provider_info in TestProviders(timeout=self.timeout).get_results(
363
+ run=run_new_test
364
+ ):
365
+ if provider_info["name"] in self.exclude:
366
+ continue
367
+ try:
368
+ self.provider_name = f"g4f-{provider_info['name']}"
369
+ self.provider = AsyncGPT4FREE(
370
+ provider=provider_info["name"],
371
+ is_conversation=self.is_conversation,
372
+ max_tokens=self.max_tokens,
373
+ intro=self.intro,
374
+ filepath=self.filepath,
375
+ update_file=self.update_file,
376
+ proxies=self.proxies,
377
+ history_offset=self.history_offset,
378
+ act=self.act,
379
+ )
380
+
381
+ async def for_stream():
382
+ async_ask = await self.provider.ask(**ask_kwargs)
383
+ async for chunk in async_ask:
384
+ yield chunk
385
+
386
+ async def for_non_stream():
387
+ return await self.provider.ask(**ask_kwargs)
388
+
389
+ return for_stream() if stream else await for_non_stream()
390
+
391
+ except Exception as e:
392
+ logging.debug(
393
+ f"Failed to generate response using GPT4FREE-base provider {provider_name} - {e}"
394
+ )
395
+
396
+ raise AllProvidersFailure(
397
+ "None of the providers generated response successfully."
398
+ )
399
+
400
+ async def chat(
401
+ self,
402
+ prompt: str,
403
+ stream: bool = False,
404
+ optimizer: str = None,
405
+ conversationally: bool = False,
406
+ run_new_test: bool = False,
407
+ ) -> str | AsyncGenerator:
408
+ """Generate response `str` asynchronously.
409
+ Args:
410
+ prompt (str): Prompt to be send.
411
+ stream (bool, optional): Flag for streaming response. Defaults to False.
412
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
413
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
414
+ run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
415
+ Returns:
416
+ str|AsyncGenerator: Response generated
417
+ """
418
+
419
+ async def for_stream():
420
+ async_ask = await self.ask(
421
+ prompt,
422
+ True,
423
+ optimizer=optimizer,
424
+ conversationally=conversationally,
425
+ run_new_test=run_new_test,
426
+ )
427
+ async for response in async_ask:
428
+ yield await self.get_message(response)
429
+
430
+ async def for_non_stream():
431
+ ask_response = await self.ask(
432
+ prompt,
433
+ False,
434
+ optimizer=optimizer,
435
+ conversationally=conversationally,
436
+ run_new_test=run_new_test,
437
+ )
438
+ return await self.get_message(ask_response)
439
+
440
+ return for_stream() if stream else await for_non_stream()
441
+
442
+ async def get_message(self, response: dict) -> str:
443
+ """Retrieves message only from response
444
+
445
+ Args:
446
+ response (dict): Response generated by `self.ask`
447
+
448
+ Returns:
449
+ str: Message extracted
450
+ """
451
+ assert self.provider is not None, "Chat with AI first"
452
+ return await self.provider.get_message(response)
webscout/AIutel.py CHANGED
@@ -41,7 +41,9 @@ webai = [
41
41
  "yepchat",
42
42
  "you",
43
43
  "xjai",
44
- "thinkany"
44
+ "thinkany",
45
+ "auto",
46
+
45
47
  ]
46
48
 
47
49
  gpt4free_providers = [
webscout/__init__.py CHANGED
@@ -11,7 +11,6 @@ from .DWEBS import DeepWEBS
11
11
  from .transcriber import transcriber
12
12
  from .voice import play_audio
13
13
 
14
-
15
14
  __repo__ = "https://github.com/OE-LUCIFER/Webscout"
16
15
 
17
16
  webai = [
@@ -30,7 +29,9 @@ webai = [
30
29
  "yepchat",
31
30
  "you",
32
31
  "xjai",
33
- "thinkany"
32
+ "thinkany",
33
+ "auto",
34
+
34
35
  ]
35
36
 
36
37
  gpt4free_providers = [
@@ -7,7 +7,7 @@ from webscout.AI import AsyncLEO
7
7
  from webscout.AI import AsyncKOBOLDAI
8
8
  from webscout.AI import AsyncGROQ
9
9
  from webscout.AI import AsyncBLACKBOXAI
10
- from webscout.AI import AsyncGPT4FREE
10
+ from webscout.g4f import AsyncGPT4FREE
11
11
 
12
12
  mapper: dict[str, object] = {
13
13
  "phind": AsyncPhindSearch,
@@ -21,13 +21,3 @@ mapper: dict[str, object] = {
21
21
  "groq": AsyncGROQ,
22
22
  "openai": AsyncOPENAI,
23
23
  }
24
-
25
- tgpt_mapper: dict[str, object] = {
26
- "phind": AsyncPhindSearch,
27
- "opengpt": AsyncOPENGPT,
28
- "koboldai": AsyncKOBOLDAI,
29
- # "gpt4free": AsyncGPT4FREE,
30
- "blackboxai": AsyncBLACKBOXAI,
31
- "llama2": AsyncLLAMA2,
32
- "yepchat": AsyncYEPCHAT,
33
- }
webscout/exceptions.py CHANGED
@@ -10,4 +10,9 @@ class TimeoutE(Exception):
10
10
  """Raised for timeout errors during API requests."""
11
11
 
12
12
  class FailedToGenerateResponseError(Exception):
13
+
13
14
  """Provider failed to fetch response"""
15
+ class AllProvidersFailure(Exception):
16
+ """None of the providers generated response successfully"""
17
+
18
+ pass
webscout/webai.py CHANGED
@@ -456,7 +456,20 @@ class Main(cmd.Cmd):
456
456
  history_offset=history_offset,
457
457
  act=awesome_prompt,
458
458
  )
459
+ if provider == "auto":
460
+ from webscout.AIauto import AUTO
459
461
 
462
+ self.bot = AUTO(
463
+ is_conversation=disable_conversation,
464
+ max_tokens=max_tokens,
465
+ timeout=timeout,
466
+ intro=intro,
467
+ filepath=filepath,
468
+ update_file=update_file,
469
+ proxies=proxies,
470
+ history_offset=history_offset,
471
+ act=awesome_prompt,
472
+ )
460
473
  elif provider == "opengpt":
461
474
  from webscout.AI import OPENGPT
462
475
 
@@ -470,6 +483,7 @@ class Main(cmd.Cmd):
470
483
  proxies=proxies,
471
484
  history_offset=history_offset,
472
485
  act=awesome_prompt,
486
+ assistant_id="bca37014-6f97-4f2b-8928-81ea8d478d88"
473
487
  )
474
488
  elif provider == "thinkany":
475
489
  from webscout.AI import ThinkAnyAI
@@ -725,7 +739,13 @@ class Main(cmd.Cmd):
725
739
  self.__init_time = time.time()
726
740
  self.__start_time = time.time()
727
741
  self.__end_time = time.time()
728
-
742
+
743
+ @property
744
+ def get_provider(self):
745
+ if self.provider == "auto" and self.bot.provider_name is not None:
746
+ return self.bot.provider_name
747
+ else:
748
+ return self.provider
729
749
  @property
730
750
  def prompt(self):
731
751
  current_time = datetime.datetime.now().strftime("%H:%M:%S")
@@ -740,7 +760,7 @@ class Main(cmd.Cmd):
740
760
  if not self.disable_coloring:
741
761
  cmd_prompt = (
742
762
  f"╭─[`{Fore.GREEN}{getpass.getuser().capitalize()}@webai]`"
743
- f"(`{Fore.YELLOW}{self.provider})`"
763
+ f"(`{Fore.YELLOW}{self.get_provider})`"
744
764
  f"~[`{Fore.LIGHTWHITE_EX}⏰{Fore.MAGENTA}{current_time}-`"
745
765
  f"{Fore.LIGHTWHITE_EX}💻{Fore.BLUE}{find_range(self.__init_time, time.time(), True)}-`"
746
766
  f"{Fore.LIGHTWHITE_EX}⚡️{Fore.RED}{find_range(self.__start_time, self.__end_time)}s]`"
@@ -753,7 +773,7 @@ class Main(cmd.Cmd):
753
773
 
754
774
  else:
755
775
  return (
756
- f"╭─[{getpass.getuser().capitalize()}@webscout]({self.provider})"
776
+ f"╭─[{getpass.getuser().capitalize()}@webscout]({self.get_provider})"
757
777
  f"~[⏰{current_time}"
758
778
  f"-💻{find_range(self.__init_time, time.time(), True)}"
759
779
  f"-⚡️{find_range(self.__start_time, self.__end_time)}s]"
@@ -1125,7 +1145,7 @@ class Main(cmd.Cmd):
1125
1145
  busy_bar.stop_spinning()
1126
1146
  this.stream_output(
1127
1147
  generated_response,
1128
- title="AI Response",
1148
+ title="Webscout",
1129
1149
  is_markdown=self.prettify,
1130
1150
  style=Style(
1131
1151
  color=self.color,
@@ -4,12 +4,6 @@ from threading import Thread
4
4
  import sys
5
5
  from types import TracebackType
6
6
  from typing import Any, Awaitable, Dict, Optional, Type, Union
7
- if sys.platform == 'win32':
8
- try:
9
- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
10
- except AttributeError:
11
- # If WindowsSelectorEventLoopPolicy is not available, do nothing
12
- pass
13
7
  from .webscout_search_async import AsyncWEBS
14
8
 
15
9
 
@@ -24,7 +18,7 @@ class WEBS(AsyncWEBS):
24
18
  proxies: Union[Dict[str, str], str, None] = None, # deprecated
25
19
  timeout: Optional[int] = 10,
26
20
  ) -> None:
27
- """Initialize the DDGS object.
21
+ """Initialize the WEBS object.
28
22
 
29
23
  Args:
30
24
  headers (dict, optional): Dictionary of headers for the HTTP client. Defaults to None.
@@ -81,4 +75,4 @@ class WEBS(AsyncWEBS):
81
75
  return self._run_async_in_thread(super().maps(*args, **kwargs))
82
76
 
83
77
  def translate(self, *args: Any, **kwargs: Any) -> Any:
84
- return self._run_async_in_thread(super().translate(*args, **kwargs))
78
+ return self._run_async_in_thread(super().translate(*args, **kwargs))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.4.5
3
+ Version: 1.4.6
4
4
  Summary: Search for anything using the Google, DuckDuckGo, phind.com. Also containes AI models, can transcribe yt videos, temporary email and phone number generation, have TTS support and webai(terminal gpt and open interpeter)
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -24,14 +24,14 @@ Classifier: Topic :: Internet :: WWW/HTTP :: Indexing/Search
24
24
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
25
25
  Description-Content-Type: text/markdown
26
26
  License-File: LICENSE.md
27
- Requires-Dist: docstring-inheritance >=2.1.2
28
- Requires-Dist: click >=8.1.7
29
- Requires-Dist: curl-cffi >=0.6.0b7
30
- Requires-Dist: lxml >=5.1.0
31
- Requires-Dist: nest-asyncio >=1.6.0
32
- Requires-Dist: selenium >=4.1.3
33
- Requires-Dist: tqdm >=4.64.0
34
- Requires-Dist: webdriver-manager >=3.5.4
27
+ Requires-Dist: docstring-inheritance
28
+ Requires-Dist: click
29
+ Requires-Dist: curl-cffi
30
+ Requires-Dist: lxml
31
+ Requires-Dist: nest-asyncio
32
+ Requires-Dist: selenium
33
+ Requires-Dist: tqdm
34
+ Requires-Dist: webdriver-manager
35
35
  Requires-Dist: halo >=0.0.31
36
36
  Requires-Dist: g4f >=0.2.2.3
37
37
  Requires-Dist: rich
@@ -10,16 +10,17 @@ DeepWEBS/networks/webpage_fetcher.py,sha256=vRB9T3o-nMgrMkG2NPHTDctNeXaPSKCmBXqu
10
10
  DeepWEBS/utilsdw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  DeepWEBS/utilsdw/enver.py,sha256=vpI7s4_o_VL9govSryOv-z1zYK3pTEW3-H9QNN8JYtc,2472
12
12
  DeepWEBS/utilsdw/logger.py,sha256=Z0nFUcEGyU8r28yKiIyvEtO26xxpmJgbvNToTfwZecc,8174
13
- webscout/AI.py,sha256=sQGpHRwikBBXcxWsB-nRCSBG3oYhSVBHUFr3C2dHrv8,226145
13
+ webscout/AI.py,sha256=Iw19aegKqEUD87r0LDMK0qXQ0uCv9qKVSH6ZYJyCJYI,226181
14
+ webscout/AIauto.py,sha256=NlIx-Nfuq-xJ3uZUOUJUXtZ2tzwcbx1ViIlnVK2aCrw,17297
14
15
  webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
15
- webscout/AIutel.py,sha256=WJXAUaNK4IQ-txweZhm3scE11b-pK_tlIjS5VWJN8_E,33217
16
+ webscout/AIutel.py,sha256=Kan_bxd3KqNKZbbOQq_4MVg7fG_Caz-WgYOliUQClU0,33233
16
17
  webscout/DWEBS.py,sha256=QT-7-dUgWhQ_H7EVZD53AVyXxyskoPMKCkFIpzkN56Q,7332
17
18
  webscout/LLM.py,sha256=CiDz0okZNEoXuxMwadZnwRGSLpqk2zg0vzvXSxQZjcE,1910
18
- webscout/__init__.py,sha256=WcRrw6-MIpt_lHtA498MaO5oWuMRkEk5qYH0mVt4_Nc,1090
19
+ webscout/__init__.py,sha256=k67obDLdOiBMKlrqId7y9DuwI_vxLS__3J6DdZaOJ5k,1104
19
20
  webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
20
- webscout/async_providers.py,sha256=pPoSdfB_4SlOYcpAtkKIyDtl7sZ9DGgWy5aIBOjBO9Q,971
21
+ webscout/async_providers.py,sha256=KGWKAhdEh4nMntLtyCaO0p827Tcg__gBLT_MLRO4l5o,711
21
22
  webscout/cli.py,sha256=F888fdrFUQgczMBN4yMOSf6Nh-IbvkqpPhDsbnA2FtQ,17059
22
- webscout/exceptions.py,sha256=e4hJnOEAiYuA6BTsMgv4R-vOq0Tt3f9ba0ROTNtPDl4,378
23
+ webscout/exceptions.py,sha256=Wx8bEN3bz1nNZ9PAZHX8jwvFPddF9Y2pHAEwCMu_VJc,498
23
24
  webscout/g4f.py,sha256=F7POjR03ek7eZvcTX-p7gMe1b0nLNoIqF-L_vZwos0c,24489
24
25
  webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
25
26
  webscout/tempid.py,sha256=5oc3UbXhPGKxrMRTfRABT-V-dNzH_hOKWtLYM6iCWd4,5896
@@ -27,12 +28,12 @@ webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
27
28
  webscout/utils.py,sha256=CxeXvp0rWIulUrEaPZMaNfg_tSuQLRSV8uuHA2chyKE,2603
28
29
  webscout/version.py,sha256=2IusSRAul_UY0-wnbdAHj0XD7AIfWOrO2BBUSV-Sep0,25
29
30
  webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
30
- webscout/webai.py,sha256=Ijnkr8b0mO2I8-mdCZggoIZ5cqMfxaVFpKpeJf7xtTw,82831
31
- webscout/webscout_search.py,sha256=bBod97PffHDhS1AiyzMJJN74PXWK3A5OFCcEoHYWtcw,3393
31
+ webscout/webai.py,sha256=_bwgdEG14-zkiLmToaz7rD6iUJ3mALRM_-BFpBs263Y,83660
32
+ webscout/webscout_search.py,sha256=TvbrRYVMXbFGgEh0CoFHNYVY3iQ8SmejxEmv8Csu4IA,3159
32
33
  webscout/webscout_search_async.py,sha256=4_L_t_I9WlvpPEI3FI0K3v6Aayr0pNvD3chYOp7JR8o,42902
33
- webscout-1.4.5.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
34
- webscout-1.4.5.dist-info/METADATA,sha256=rLuICqX1CjT-NAz4IxoEvn2k78rmNyCLL-ecsHWr3wU,43430
35
- webscout-1.4.5.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
36
- webscout-1.4.5.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
37
- webscout-1.4.5.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
38
- webscout-1.4.5.dist-info/RECORD,,
34
+ webscout-1.4.6.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
35
+ webscout-1.4.6.dist-info/METADATA,sha256=7-I6QU3jBTJ6ZrIB_vpBbYh39nSWHhI-PrVDAMHTLwM,43363
36
+ webscout-1.4.6.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
37
+ webscout-1.4.6.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
38
+ webscout-1.4.6.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
39
+ webscout-1.4.6.dist-info/RECORD,,