webscout 5.1__py3-none-any.whl → 5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AIauto.py CHANGED
@@ -1,36 +1,45 @@
1
1
  from webscout.AIbase import Provider, AsyncProvider
2
2
  from webscout.Provider.ThinkAnyAI import ThinkAnyAI
3
- from webscout.Provider.Xjai import Xjai
4
3
  from webscout.Provider.Llama import LLAMA
5
- from webscout.Provider.Leo import LEO
6
- from webscout.Provider.Leo import AsyncLEO
4
+
7
5
  from webscout.Provider.Koboldai import KOBOLDAI
8
6
  from webscout.Provider.Koboldai import AsyncKOBOLDAI
9
- from webscout.Provider.OpenGPT import OPENGPT
10
- from webscout.Provider.OpenGPT import OPENGPTv2
11
- from webscout.Provider.OpenGPT import AsyncOPENGPT
12
- from webscout.Provider.Perplexity import PERPLEXITY
7
+
8
+ from webscout.Provider.Perplexity import Perplexity
13
9
  from webscout.Provider.Blackboxai import BLACKBOXAI
14
10
  from webscout.Provider.Blackboxai import AsyncBLACKBOXAI
15
11
  from webscout.Provider.Phind import PhindSearch
16
12
  from webscout.Provider.Phind import AsyncPhindSearch
17
13
  from webscout.Provider.Phind import Phindv2
18
14
  from webscout.Provider.Phind import AsyncPhindv2
19
- from webscout.Provider.Yepchat import YEPCHAT
20
- from webscout.Provider.Yepchat import AsyncYEPCHAT
15
+ from webscout.Provider.yep import YEPCHAT
21
16
  from webscout.Provider.Berlin4h import Berlin4h
22
- from webscout.Provider.ChatGPTUK import ChatGPTUK
23
17
  from webscout.Provider.Poe import POE
24
18
  from webscout.Provider.BasedGPT import BasedGPT
25
19
  from webscout.Provider.Deepseek import DeepSeek
26
20
  from webscout.Provider.Deepinfra import DeepInfra, VLM, AsyncDeepInfra
27
- from webscout.Provider.VTLchat import VTLchat
28
- from webscout.Provider.Geminipro import GEMINIPRO
29
- from webscout.Provider.Geminiflash import GEMINIFLASH
21
+ from webscout.Provider.OLLAMA import OLLAMA
22
+ from webscout.Provider.Andi import AndiSearch
23
+ from webscout.Provider.Llama3 import LLAMA3
24
+ from webscout.Provider.DARKAI import DARKAI
25
+ from webscout.Provider.koala import KOALA
26
+ from webscout.Provider.RUBIKSAI import RUBIKSAI
27
+ from webscout.Provider.meta import Meta
28
+ from webscout.Provider.liaobots import LiaoBots
29
+ from webscout.Provider.DiscordRocks import DiscordRocks
30
+ from webscout.Provider.felo_search import Felo
31
+ from webscout.Provider.xdash import XDASH
32
+ from webscout.Provider.julius import Julius
33
+ from webscout.Provider.Youchat import YouChat
34
+ from webscout.Provider.Cloudflare import Cloudflare
35
+ from webscout.Provider.turboseek import TurboSeek
36
+ from webscout.Provider.NetFly import NetFly
37
+ from webscout.Provider.EDITEE import Editee
38
+ # from webscout.Provider.Chatify import Chatify # TODO: UNFINISHED
39
+ from webscout.Provider.PI import PiAI
30
40
  from webscout.g4f import GPT4FREE, AsyncGPT4FREE
31
41
  from webscout.g4f import TestProviders
32
42
  from webscout.exceptions import AllProvidersFailure
33
- from webscout.async_providers import mapper as async_provider_map
34
43
  from typing import AsyncGenerator
35
44
 
36
45
  from typing import Union
@@ -42,53 +51,75 @@ provider_map: dict[
42
51
  str,
43
52
  Union[
44
53
  ThinkAnyAI,
45
- Xjai,
46
54
  LLAMA,
47
- LEO,
48
55
  KOBOLDAI,
49
- OPENGPT,
50
- OPENGPTv2,
51
- PERPLEXITY,
56
+ Perplexity,
52
57
  BLACKBOXAI,
53
58
  PhindSearch,
54
59
  Phindv2,
55
60
  YEPCHAT,
56
61
  Berlin4h,
57
- ChatGPTUK,
58
62
  POE,
59
63
  BasedGPT,
60
64
  DeepSeek,
61
65
  DeepInfra,
62
66
  VLM,
63
- VTLchat,
64
- GEMINIPRO,
65
- GEMINIFLASH,
66
67
  GPT4FREE,
68
+ OLLAMA,
69
+ AndiSearch,
70
+ LLAMA3,
71
+ DARKAI,
72
+ KOALA,
73
+ RUBIKSAI,
74
+ Meta,
75
+ LiaoBots,
76
+ DiscordRocks,
77
+ Felo,
78
+ XDASH,
79
+ Julius,
80
+ YouChat,
81
+ Cloudflare,
82
+ TurboSeek,
83
+ NetFly,
84
+ Editee,
85
+ # Chatify,
86
+ PiAI,
67
87
  ],
68
88
  ] = {
69
89
  "ThinkAnyAI": ThinkAnyAI,
70
- "Xjai": Xjai,
71
90
  "LLAMA2": LLAMA,
72
- "LEO": LEO,
73
91
  "KOBOLDAI": KOBOLDAI,
74
- "OPENGPT": OPENGPT,
75
- "OPENGPTv2": OPENGPTv2,
76
- "PERPLEXITY": PERPLEXITY,
92
+ "PERPLEXITY": Perplexity,
77
93
  "BLACKBOXAI": BLACKBOXAI,
78
94
  "PhindSearch": PhindSearch,
79
95
  "Phindv2": Phindv2,
80
96
  "YEPCHAT": YEPCHAT,
81
97
  "Berlin4h": Berlin4h,
82
- "ChatGPTUK": ChatGPTUK,
83
98
  "POE": POE,
84
99
  "BasedGPT": BasedGPT,
85
100
  "DeepSeek": DeepSeek,
86
101
  "DeepInfra": DeepInfra,
87
102
  "VLM": VLM,
88
- "VTLchat": VTLchat,
89
- "GEMINIPRO": GEMINIPRO,
90
- "GEMINIFLASH": GEMINIFLASH,
91
103
  "gpt4free": GPT4FREE,
104
+ "ollama": OLLAMA,
105
+ "andi": AndiSearch,
106
+ "llama3": LLAMA3,
107
+ "darkai": DARKAI,
108
+ "koala": KOALA,
109
+ "rubiksai": RUBIKSAI,
110
+ "meta": Meta,
111
+ "liaobots": LiaoBots,
112
+ "discordrocks": DiscordRocks,
113
+ "felo": Felo,
114
+ "xdash": XDASH,
115
+ "julius": Julius,
116
+ "you": YouChat,
117
+ "cloudflare": Cloudflare,
118
+ "turboseek": TurboSeek,
119
+ "netfly": NetFly,
120
+ "editee": Editee,
121
+ # "chatify": Chatify,
122
+ "pi": PiAI,
92
123
  }
93
124
 
94
125
 
@@ -122,28 +153,39 @@ class AUTO(Provider):
122
153
  """
123
154
  self.provider: Union[
124
155
  ThinkAnyAI,
125
- Xjai,
126
156
  LLAMA,
127
- LEO,
128
157
  KOBOLDAI,
129
- OPENGPT,
130
- OPENGPTv2,
131
- PERPLEXITY,
158
+ Perplexity,
132
159
  BLACKBOXAI,
133
160
  PhindSearch,
134
161
  Phindv2,
135
162
  YEPCHAT,
136
163
  Berlin4h,
137
- ChatGPTUK,
138
164
  POE,
139
165
  BasedGPT,
140
166
  DeepSeek,
141
167
  DeepInfra,
142
168
  VLM,
143
- VTLchat,
144
- GEMINIPRO,
145
- GEMINIFLASH,
146
169
  GPT4FREE,
170
+ OLLAMA,
171
+ AndiSearch,
172
+ LLAMA3,
173
+ DARKAI,
174
+ KOALA,
175
+ RUBIKSAI,
176
+ Meta,
177
+ LiaoBots,
178
+ DiscordRocks,
179
+ Felo,
180
+ XDASH,
181
+ Julius,
182
+ YouChat,
183
+ Cloudflare,
184
+ TurboSeek,
185
+ NetFly,
186
+ Editee,
187
+ # Chatify,
188
+ PiAI,
147
189
  ] = None
148
190
  self.provider_name: str = None
149
191
  self.is_conversation = is_conversation
@@ -318,239 +360,3 @@ class AUTO(Provider):
318
360
  """
319
361
  assert self.provider is not None, "Chat with AI first"
320
362
  return self.provider.get_message(response)
321
-
322
-
323
- class AsyncAUTO(AsyncProvider):
324
- def __init__(
325
- self,
326
- is_conversation: bool = True,
327
- max_tokens: int = 600,
328
- timeout: int = 30,
329
- intro: str = None,
330
- filepath: str = None,
331
- update_file: bool = True,
332
- proxies: dict = {},
333
- history_offset: int = 10250,
334
- act: str = None,
335
- exclude: list[str] = [],
336
- ):
337
- """Instantiates AsyncAUTO
338
-
339
- Args:
340
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
341
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
342
- timeout (int, optional): Http request timeout. Defaults to 30.
343
- intro (str, optional): Conversation introductory prompt. Defaults to None.
344
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
345
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
346
- proxies (dict, optional): Http request proxies. Defaults to {}.
347
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
348
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
349
- exclude(list[str], optional): List of providers to be excluded. Defaults to [].
350
- """
351
- self.provider: Union[
352
- AsyncOPENGPT,
353
- AsyncKOBOLDAI,
354
- AsyncPhindSearch,
355
- AsyncBLACKBOXAI,
356
- AsyncGPT4FREE,
357
- AsyncLEO,
358
- ThinkAnyAI,
359
- Xjai,
360
- LLAMA,
361
- LEO,
362
- KOBOLDAI,
363
- OPENGPT,
364
- OPENGPTv2,
365
- PERPLEXITY,
366
- BLACKBOXAI,
367
- PhindSearch,
368
- Phindv2,
369
- YEPCHAT,
370
- Berlin4h,
371
- ChatGPTUK,
372
- POE,
373
- BasedGPT,
374
- DeepSeek,
375
- DeepInfra,
376
- VLM,
377
- VTLchat,
378
- GEMINIPRO,
379
- GEMINIFLASH,
380
- GPT4FREE
381
- ] = None
382
- self.provider_name: str = None
383
- self.is_conversation = is_conversation
384
- self.max_tokens = max_tokens
385
- self.timeout = timeout
386
- self.intro = intro
387
- self.filepath = filepath
388
- self.update_file = update_file
389
- self.proxies = proxies
390
- self.history_offset = history_offset
391
- self.act = act
392
- self.exclude = exclude
393
-
394
- @property
395
- def last_response(self) -> dict[str, Any]:
396
- return self.provider.last_response
397
-
398
- @property
399
- def conversation(self) -> object:
400
- return self.provider.conversation
401
-
402
- async def ask(
403
- self,
404
- prompt: str,
405
- stream: bool = False,
406
- raw: bool = False,
407
- optimizer: str = None,
408
- conversationally: bool = False,
409
- run_new_test: bool = False,
410
- ) -> dict | AsyncGenerator:
411
- """Chat with AI asynchronously.
412
-
413
- Args:
414
- prompt (str): Prompt to be send.
415
- stream (bool, optional): Flag for streaming response. Defaults to False.
416
- raw (bool, optional): Stream back raw response as received. Defaults to False.
417
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
418
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
419
- run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
420
- Returns:
421
- dict|AsyncGenerator : ai response.
422
- """
423
- ask_kwargs: dict[str, Union[str, bool]] = {
424
- "prompt": prompt,
425
- "stream": stream,
426
- "raw": raw,
427
- "optimizer": optimizer,
428
- "conversationally": conversationally,
429
- }
430
-
431
- # tgpt-based providers
432
- for provider_name, provider_obj in async_provider_map.items():
433
- if provider_name in self.exclude:
434
- continue
435
- try:
436
- self.provider_name = f"tgpt-{provider_name}"
437
- self.provider = provider_obj(
438
- is_conversation=self.is_conversation,
439
- max_tokens=self.max_tokens,
440
- timeout=self.timeout,
441
- intro=self.intro,
442
- filepath=self.filepath,
443
- update_file=self.update_file,
444
- proxies=self.proxies,
445
- history_offset=self.history_offset,
446
- act=self.act,
447
- )
448
-
449
- async def for_stream():
450
- async_ask = await self.provider.ask(**ask_kwargs)
451
- async for chunk in async_ask:
452
- yield chunk
453
-
454
- async def for_non_stream():
455
- return await self.provider.ask(**ask_kwargs)
456
-
457
- return for_stream() if stream else await for_non_stream()
458
-
459
- except Exception as e:
460
- logging.debug(
461
- f"Failed to generate response using provider {provider_name} - {e}"
462
- )
463
-
464
- # g4f-based providers
465
-
466
- for provider_info in TestProviders(timeout=self.timeout).get_results(
467
- run=run_new_test
468
- ):
469
- if provider_info["name"] in self.exclude:
470
- continue
471
- try:
472
- self.provider_name = f"g4f-{provider_info['name']}"
473
- self.provider = AsyncGPT4FREE(
474
- provider=provider_info["name"],
475
- is_conversation=self.is_conversation,
476
- max_tokens=self.max_tokens,
477
- intro=self.intro,
478
- filepath=self.filepath,
479
- update_file=self.update_file,
480
- proxies=self.proxies,
481
- history_offset=self.history_offset,
482
- act=self.act,
483
- )
484
-
485
- async def for_stream():
486
- async_ask = await self.provider.ask(**ask_kwargs)
487
- async for chunk in async_ask:
488
- yield chunk
489
-
490
- async def for_non_stream():
491
- return await self.provider.ask(**ask_kwargs)
492
-
493
- return for_stream() if stream else await for_non_stream()
494
-
495
- except Exception as e:
496
- logging.debug(
497
- f"Failed to generate response using GPT4FREE-base provider {provider_name} - {e}"
498
- )
499
-
500
- raise AllProvidersFailure(
501
- "None of the providers generated response successfully."
502
- )
503
-
504
- async def chat(
505
- self,
506
- prompt: str,
507
- stream: bool = False,
508
- optimizer: str = None,
509
- conversationally: bool = False,
510
- run_new_test: bool = False,
511
- ) -> str | AsyncGenerator:
512
- """Generate response `str` asynchronously.
513
- Args:
514
- prompt (str): Prompt to be send.
515
- stream (bool, optional): Flag for streaming response. Defaults to False.
516
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
517
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
518
- run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
519
- Returns:
520
- str|AsyncGenerator: Response generated
521
- """
522
-
523
- async def for_stream():
524
- async_ask = await self.ask(
525
- prompt,
526
- True,
527
- optimizer=optimizer,
528
- conversationally=conversationally,
529
- run_new_test=run_new_test,
530
- )
531
- async for response in async_ask:
532
- yield await self.get_message(response)
533
-
534
- async def for_non_stream():
535
- ask_response = await self.ask(
536
- prompt,
537
- False,
538
- optimizer=optimizer,
539
- conversationally=conversationally,
540
- run_new_test=run_new_test,
541
- )
542
- return await self.get_message(ask_response)
543
-
544
- return for_stream() if stream else await for_non_stream()
545
-
546
- async def get_message(self, response: dict) -> str:
547
- """Retrieves message only from response
548
-
549
- Args:
550
- response (dict): Response generated by `self.ask`
551
-
552
- Returns:
553
- str: Message extracted
554
- """
555
- assert self.provider is not None, "Chat with AI first"
556
- return await self.provider.get_message(response)
webscout/AIbase.py CHANGED
@@ -1,9 +1,9 @@
1
1
  from abc import ABC
2
2
  from abc import abstractmethod
3
-
3
+ from typing import AsyncGenerator, List, Union, Generator
4
4
 
5
5
  class Provider(ABC):
6
- """Base class for providers"""
6
+ """Base class for text-based AI providers"""
7
7
 
8
8
  @abstractmethod
9
9
  def ask(
@@ -69,9 +69,8 @@ class Provider(ABC):
69
69
  """
70
70
  raise NotImplementedError("Method needs to be implemented in subclass")
71
71
 
72
-
73
72
  class AsyncProvider(ABC):
74
- """Asynchronous base class for providers"""
73
+ """Asynchronous base class for text-based AI providers"""
75
74
 
76
75
  @abstractmethod
77
76
  async def ask(
@@ -135,4 +134,107 @@ class AsyncProvider(ABC):
135
134
  Returns:
136
135
  str: Message extracted
137
136
  """
137
+ raise NotImplementedError("Method needs to be implemented in subclass")
138
+
139
+ class TTSProvider(ABC):
140
+ """Base class for text-to-speech providers"""
141
+
142
+ @abstractmethod
143
+ def tts(self, text: str) -> Union[bytes, str, Generator[bytes, None, None]]:
144
+ """
145
+ Converts text to speech.
146
+
147
+ Args:
148
+ text (str): The text to be converted to speech.
149
+
150
+ Returns:
151
+ Union[bytes, str, Generator[bytes, None, None]]:
152
+ - The raw audio bytes if the provider supports returning audio directly.
153
+ - The filename of the saved audio file if the provider saves the audio to a file.
154
+ - A generator yielding chunks of audio bytes if the provider supports streaming audio.
155
+ """
156
+ raise NotImplementedError("Method needs to be implemented in subclass")
157
+
158
+ class AsyncTTSProvider(ABC):
159
+ """Asynchronous base class for text-to-speech providers"""
160
+
161
+ @abstractmethod
162
+ async def tts(self, text: str) -> Union[bytes, str, AsyncGenerator[bytes, None]]:
163
+ """
164
+ Asynchronously converts text to speech.
165
+
166
+ Args:
167
+ text (str): The text to be converted to speech.
168
+
169
+ Returns:
170
+ Union[bytes, str, AsyncGenerator[bytes, None]]:
171
+ - The raw audio bytes if the provider supports returning audio directly.
172
+ - The filename of the saved audio file if the provider saves the audio to a file.
173
+ - An asynchronous generator yielding chunks of audio bytes if the provider supports streaming audio.
174
+ """
175
+ raise NotImplementedError("Method needs to be implemented in subclass")
176
+
177
+ class ImageProvider(ABC):
178
+ """Base class for text-to-image providers"""
179
+
180
+ @abstractmethod
181
+ def generate(self, prompt: str, amount: int = 1) -> List[bytes]:
182
+ """Generates images from a text prompt.
183
+
184
+ Args:
185
+ prompt (str): The text prompt describing the desired image.
186
+ amount (int, optional): The number of images to generate. Defaults to 1.
187
+
188
+ Returns:
189
+ List[bytes]: A list of generated images in bytes format.
190
+ """
191
+ raise NotImplementedError("Method needs to be implemented in subclass")
192
+
193
+ @abstractmethod
194
+ def save(self, response: List[bytes], name: str = None, dir: str = None) -> List[str]:
195
+ """Saves the generated images to files.
196
+
197
+ Args:
198
+ response (List[bytes]): The list of generated images in bytes format.
199
+ name (str, optional): The base filename for the images. Defaults to None.
200
+ dir (str, optional): The directory to save the images. Defaults to None.
201
+
202
+ Returns:
203
+ List[str]: A list of the saved filenames.
204
+ """
205
+ raise NotImplementedError("Method needs to be implemented in subclass")
206
+
207
+ class AsyncImageProvider(ABC):
208
+ """Base class for asynchronous text-to-image providers"""
209
+
210
+ @abstractmethod
211
+ async def generate(self, prompt: str, amount: int = 1) -> Union[AsyncGenerator[bytes, None], List[bytes]]:
212
+ """Asynchronously generates images from a text prompt.
213
+
214
+ Args:
215
+ prompt (str): The text prompt describing the desired image.
216
+ amount (int, optional): The number of images to generate. Defaults to 1.
217
+
218
+ Returns:
219
+ Union[AsyncGenerator[bytes, None], List[bytes]]:
220
+ - An asynchronous generator yielding image bytes.
221
+ - A list of image bytes if not streaming.
222
+ """
223
+ raise NotImplementedError("Method needs to be implemented in subclass")
224
+
225
+ @abstractmethod
226
+ async def save(self, response: Union[AsyncGenerator[bytes, None], List[bytes]],
227
+ name: str = None, dir: str = None) -> List[str]:
228
+ """Asynchronously saves the generated images to files.
229
+
230
+ Args:
231
+ response (Union[AsyncGenerator[bytes, None], List[bytes]]):
232
+ - The asynchronous generator yielding images in bytes format (if streaming).
233
+ - The list of generated images in bytes format (if not streaming).
234
+ name (str, optional): The base filename for the images. Defaults to None.
235
+ dir (str, optional): The directory to save the images. Defaults to None.
236
+
237
+ Returns:
238
+ List[str]: A list of the saved filenames.
239
+ """
138
240
  raise NotImplementedError("Method needs to be implemented in subclass")
webscout/AIutel.py CHANGED
@@ -173,6 +173,23 @@ class Optimizers:
173
173
  )
174
174
 
175
175
 
176
+ class Proxy:
177
+ def __init__(self, http_proxy=None, https_proxy=None):
178
+ self.set_proxies(http_proxy, https_proxy)
179
+
180
+ def set_proxies(self, http_proxy=None, https_proxy=None):
181
+ self.proxies = {
182
+ "http": http_proxy,
183
+ "https": https_proxy
184
+ }
185
+
186
+ def post(self, url, headers=None, **kwargs):
187
+ return requests.post(url, headers=headers, proxies=self.proxies, **kwargs)
188
+
189
+ def get(self, url, headers=None, **kwargs):
190
+ return requests.get(url, headers=headers, proxies=self.proxies, **kwargs)
191
+
192
+
176
193
  class Conversation:
177
194
  """Handles prompt generation based on history"""
178
195
 
@@ -287,6 +304,20 @@ class Conversation:
287
304
  with open(self.file, "a", encoding="utf-8") as fh: # Specify UTF-8 encoding
288
305
  fh.write(new_history)
289
306
  self.chat_history += new_history
307
+ else:
308
+ self.chat_history += new_history
309
+
310
+ def add_message(self, role: str, content: str) -> None:
311
+ """Appends a new message to the conversation history."""
312
+ if role == "user":
313
+ self.chat_history += f"\nUser : {content}"
314
+ elif role == "llm":
315
+ self.chat_history += f"\nLLM : {content}"
316
+ elif role == "tool":
317
+ self.chat_history += f"\nTool : {content}"
318
+ else:
319
+ logging.warning(f"Unknown role '{role}' for message: {content}")
320
+
290
321
 
291
322
 
292
323