webscout 1.3.6__py3-none-any.whl → 1.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AIbase.py CHANGED
@@ -3,7 +3,7 @@ from abc import abstractmethod
3
3
 
4
4
 
5
5
  class Provider(ABC):
6
- """Base class for models class"""
6
+ """Base class for providers"""
7
7
 
8
8
  @abstractmethod
9
9
  def ask(
@@ -61,6 +61,74 @@ class Provider(ABC):
61
61
  def get_message(self, response: dict) -> str:
62
62
  """Retrieves message only from response
63
63
 
64
+ Args:
65
+ response (dict): Response generated by `self.ask`
66
+
67
+ Returns:
68
+ str: Message extracted
69
+ """
70
+ raise NotImplementedError("Method needs to be implemented in subclass")
71
+
72
+
73
+ class AsyncProvider(ABC):
74
+ """Asynchronous base class for providers"""
75
+
76
+ @abstractmethod
77
+ async def ask(
78
+ self,
79
+ prompt: str,
80
+ stream: bool = False,
81
+ raw: bool = False,
82
+ optimizer: str = None,
83
+ conversationally: bool = False,
84
+ ) -> dict:
85
+ """Asynchronously chat with AI
86
+
87
+ Args:
88
+ prompt (str): Prompt to be sent
89
+ stream (bool, optional): Flag for streaming response. Defaults to False.
90
+ raw (bool, optional): Stream back raw response as received
91
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`
92
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
93
+ Returns:
94
+ dict : {}
95
+ ```json
96
+ {
97
+ "completion": "\nNext: domestic cat breeds with short hair >>",
98
+ "stop_reason": null,
99
+ "truncated": false,
100
+ "stop": null,
101
+ "model": "llama-2-13b-chat",
102
+ "log_id": "cmpl-3kYiYxSNDvgMShSzFooz6t",
103
+ "exception": null
104
+ }
105
+ ```
106
+ """
107
+ raise NotImplementedError("Method needs to be implemented in subclass")
108
+
109
+ @abstractmethod
110
+ async def chat(
111
+ self,
112
+ prompt: str,
113
+ stream: bool = False,
114
+ optimizer: str = None,
115
+ conversationally: bool = False,
116
+ ) -> str:
117
+ """Asynchronously generate response `str`
118
+ Args:
119
+ prompt (str): Prompt to be sent
120
+ stream (bool, optional): Flag for streaming response. Defaults to False.
121
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`
122
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
123
+ Returns:
124
+ str: Response generated
125
+ """
126
+ raise NotImplementedError("Method needs to be implemented in subclass")
127
+
128
+ @abstractmethod
129
+ async def get_message(self, response: dict) -> str:
130
+ """Asynchronously retrieves message only from response
131
+
64
132
  Args:
65
133
  response (dict): Response generated by `self.ask`
66
134
 
webscout/AIutel.py CHANGED
@@ -11,6 +11,13 @@ import click
11
11
  from rich.markdown import Markdown
12
12
  from rich.console import Console
13
13
  import g4f
14
+ from typing import Union
15
+ from typing import NoReturn
16
+ import requests
17
+ from pathlib import Path
18
+ from playsound import playsound
19
+ from time import sleep as wait
20
+ import pathlib
14
21
  appdir = appdirs.AppDirs("AIWEBS", "vortex")
15
22
 
16
23
  default_path = appdir.user_cache_dir
@@ -27,10 +34,10 @@ webai = [
27
34
  "blackboxai",
28
35
  "g4fauto",
29
36
  "perplexity",
30
- "sean",
31
37
  "groq",
32
38
  "reka",
33
- "cohere"
39
+ "cohere",
40
+ "yepchat",
34
41
  ]
35
42
 
36
43
  gpt4free_providers = [
@@ -38,7 +45,24 @@ gpt4free_providers = [
38
45
  ]
39
46
 
40
47
  available_providers = webai + gpt4free_providers
48
+ def sanitize_stream(
49
+ chunk: str, intro_value: str = "data:", to_json: bool = True
50
+ ) -> str | dict:
51
+ """Remove streaming flags
41
52
 
53
+ Args:
54
+ chunk (str): Streamig chunk.
55
+ intro_value (str, optional): streaming flag. Defaults to "data:".
56
+ to_json (bool, optional). Return chunk as dictionary. Defaults to True.
57
+
58
+ Returns:
59
+ str: Sanitized streaming value.
60
+ """
61
+
62
+ if chunk.startswith(intro_value):
63
+ chunk = chunk[len(intro_value) :]
64
+
65
+ return json.loads(chunk) if to_json else chunk
42
66
  def run_system_command(
43
67
  command: str,
44
68
  exit_on_error: bool = True,
@@ -671,4 +695,294 @@ Current Datetime : {datetime.datetime.now()}
671
695
  f"{e.args[1] if len(e.args)>1 else str(e)}",
672
696
  "error",
673
697
  )
674
- return f"PREVIOUS SCRIPT EXCEPTION:\n{str(e)}"
698
+ return f"PREVIOUS SCRIPT EXCEPTION:\n{str(e)}"
699
+ class Audio:
700
+ # Request headers
701
+ headers: dict[str, str] = {
702
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36"
703
+ }
704
+ cache_dir = pathlib.Path("./audio_cache")
705
+ all_voices: list[str] = [
706
+ "Filiz",
707
+ "Astrid",
708
+ "Tatyana",
709
+ "Maxim",
710
+ "Carmen",
711
+ "Ines",
712
+ "Cristiano",
713
+ "Vitoria",
714
+ "Ricardo",
715
+ "Maja",
716
+ "Jan",
717
+ "Jacek",
718
+ "Ewa",
719
+ "Ruben",
720
+ "Lotte",
721
+ "Liv",
722
+ "Seoyeon",
723
+ "Takumi",
724
+ "Mizuki",
725
+ "Giorgio",
726
+ "Carla",
727
+ "Bianca",
728
+ "Karl",
729
+ "Dora",
730
+ "Mathieu",
731
+ "Celine",
732
+ "Chantal",
733
+ "Penelope",
734
+ "Miguel",
735
+ "Mia",
736
+ "Enrique",
737
+ "Conchita",
738
+ "Geraint",
739
+ "Salli",
740
+ "Matthew",
741
+ "Kimberly",
742
+ "Kendra",
743
+ "Justin",
744
+ "Joey",
745
+ "Joanna",
746
+ "Ivy",
747
+ "Raveena",
748
+ "Aditi",
749
+ "Emma",
750
+ "Brian",
751
+ "Amy",
752
+ "Russell",
753
+ "Nicole",
754
+ "Vicki",
755
+ "Marlene",
756
+ "Hans",
757
+ "Naja",
758
+ "Mads",
759
+ "Gwyneth",
760
+ "Zhiyu",
761
+ "es-ES-Standard-A",
762
+ "it-IT-Standard-A",
763
+ "it-IT-Wavenet-A",
764
+ "ja-JP-Standard-A",
765
+ "ja-JP-Wavenet-A",
766
+ "ko-KR-Standard-A",
767
+ "ko-KR-Wavenet-A",
768
+ "pt-BR-Standard-A",
769
+ "tr-TR-Standard-A",
770
+ "sv-SE-Standard-A",
771
+ "nl-NL-Standard-A",
772
+ "nl-NL-Wavenet-A",
773
+ "en-US-Wavenet-A",
774
+ "en-US-Wavenet-B",
775
+ "en-US-Wavenet-C",
776
+ "en-US-Wavenet-D",
777
+ "en-US-Wavenet-E",
778
+ "en-US-Wavenet-F",
779
+ "en-GB-Standard-A",
780
+ "en-GB-Standard-B",
781
+ "en-GB-Standard-C",
782
+ "en-GB-Standard-D",
783
+ "en-GB-Wavenet-A",
784
+ "en-GB-Wavenet-B",
785
+ "en-GB-Wavenet-C",
786
+ "en-GB-Wavenet-D",
787
+ "en-US-Standard-B",
788
+ "en-US-Standard-C",
789
+ "en-US-Standard-D",
790
+ "en-US-Standard-E",
791
+ "de-DE-Standard-A",
792
+ "de-DE-Standard-B",
793
+ "de-DE-Wavenet-A",
794
+ "de-DE-Wavenet-B",
795
+ "de-DE-Wavenet-C",
796
+ "de-DE-Wavenet-D",
797
+ "en-AU-Standard-A",
798
+ "en-AU-Standard-B",
799
+ "en-AU-Wavenet-A",
800
+ "en-AU-Wavenet-B",
801
+ "en-AU-Wavenet-C",
802
+ "en-AU-Wavenet-D",
803
+ "en-AU-Standard-C",
804
+ "en-AU-Standard-D",
805
+ "fr-CA-Standard-A",
806
+ "fr-CA-Standard-B",
807
+ "fr-CA-Standard-C",
808
+ "fr-CA-Standard-D",
809
+ "fr-FR-Standard-C",
810
+ "fr-FR-Standard-D",
811
+ "fr-FR-Wavenet-A",
812
+ "fr-FR-Wavenet-B",
813
+ "fr-FR-Wavenet-C",
814
+ "fr-FR-Wavenet-D",
815
+ "da-DK-Wavenet-A",
816
+ "pl-PL-Wavenet-A",
817
+ "pl-PL-Wavenet-B",
818
+ "pl-PL-Wavenet-C",
819
+ "pl-PL-Wavenet-D",
820
+ "pt-PT-Wavenet-A",
821
+ "pt-PT-Wavenet-B",
822
+ "pt-PT-Wavenet-C",
823
+ "pt-PT-Wavenet-D",
824
+ "ru-RU-Wavenet-A",
825
+ "ru-RU-Wavenet-B",
826
+ "ru-RU-Wavenet-C",
827
+ "ru-RU-Wavenet-D",
828
+ "sk-SK-Wavenet-A",
829
+ "tr-TR-Wavenet-A",
830
+ "tr-TR-Wavenet-B",
831
+ "tr-TR-Wavenet-C",
832
+ "tr-TR-Wavenet-D",
833
+ "tr-TR-Wavenet-E",
834
+ "uk-UA-Wavenet-A",
835
+ "ar-XA-Wavenet-A",
836
+ "ar-XA-Wavenet-B",
837
+ "ar-XA-Wavenet-C",
838
+ "cs-CZ-Wavenet-A",
839
+ "nl-NL-Wavenet-B",
840
+ "nl-NL-Wavenet-C",
841
+ "nl-NL-Wavenet-D",
842
+ "nl-NL-Wavenet-E",
843
+ "en-IN-Wavenet-A",
844
+ "en-IN-Wavenet-B",
845
+ "en-IN-Wavenet-C",
846
+ "fil-PH-Wavenet-A",
847
+ "fi-FI-Wavenet-A",
848
+ "el-GR-Wavenet-A",
849
+ "hi-IN-Wavenet-A",
850
+ "hi-IN-Wavenet-B",
851
+ "hi-IN-Wavenet-C",
852
+ "hu-HU-Wavenet-A",
853
+ "id-ID-Wavenet-A",
854
+ "id-ID-Wavenet-B",
855
+ "id-ID-Wavenet-C",
856
+ "it-IT-Wavenet-B",
857
+ "it-IT-Wavenet-C",
858
+ "it-IT-Wavenet-D",
859
+ "ja-JP-Wavenet-B",
860
+ "ja-JP-Wavenet-C",
861
+ "ja-JP-Wavenet-D",
862
+ "cmn-CN-Wavenet-A",
863
+ "cmn-CN-Wavenet-B",
864
+ "cmn-CN-Wavenet-C",
865
+ "cmn-CN-Wavenet-D",
866
+ "nb-no-Wavenet-E",
867
+ "nb-no-Wavenet-A",
868
+ "nb-no-Wavenet-B",
869
+ "nb-no-Wavenet-C",
870
+ "nb-no-Wavenet-D",
871
+ "vi-VN-Wavenet-A",
872
+ "vi-VN-Wavenet-B",
873
+ "vi-VN-Wavenet-C",
874
+ "vi-VN-Wavenet-D",
875
+ "sr-rs-Standard-A",
876
+ "lv-lv-Standard-A",
877
+ "is-is-Standard-A",
878
+ "bg-bg-Standard-A",
879
+ "af-ZA-Standard-A",
880
+ "Tracy",
881
+ "Danny",
882
+ "Huihui",
883
+ "Yaoyao",
884
+ "Kangkang",
885
+ "HanHan",
886
+ "Zhiwei",
887
+ "Asaf",
888
+ "An",
889
+ "Stefanos",
890
+ "Filip",
891
+ "Ivan",
892
+ "Heidi",
893
+ "Herena",
894
+ "Kalpana",
895
+ "Hemant",
896
+ "Matej",
897
+ "Andika",
898
+ "Rizwan",
899
+ "Lado",
900
+ "Valluvar",
901
+ "Linda",
902
+ "Heather",
903
+ "Sean",
904
+ "Michael",
905
+ "Karsten",
906
+ "Guillaume",
907
+ "Pattara",
908
+ "Jakub",
909
+ "Szabolcs",
910
+ "Hoda",
911
+ "Naayf",
912
+ ]
913
+
914
+ @classmethod
915
+ def text_to_audio(
916
+ cls,
917
+ message: str,
918
+ voice: str = "Brian",
919
+ save_to: Union[Path, str] = None,
920
+ auto: bool = True,
921
+ ) -> Union[str, bytes]:
922
+ """
923
+ Text to speech using StreamElements API
924
+
925
+ Parameters:
926
+ message (str): The text to convert to speech
927
+ voice (str, optional): The voice to use for speech synthesis. Defaults to "Brian".
928
+ save_to (bool, optional): Path to save the audio file. Defaults to None.
929
+ auto (bool, optional): Generate filename based on `message` and save to `cls.cache_dir`. Defaults to False.
930
+
931
+ Returns:
932
+ result (Union[str, bytes]): Path to saved contents or audio content.
933
+ """
934
+ assert (
935
+ voice in cls.all_voices
936
+ ), f"Voice '{voice}' not one of [{', '.join(cls.all_voices)}]"
937
+ # Base URL for provider API
938
+ url: str = (
939
+ f"https://api.streamelements.com/kappa/v2/speech?voice={voice}&text={{{message}}}"
940
+ )
941
+ resp = requests.get(url=url, headers=cls.headers, stream=True)
942
+ if not resp.ok:
943
+ raise Exception(
944
+ f"Failed to perform the operation - ({resp.status_code}, {resp.reason}) - {resp.text}"
945
+ )
946
+
947
+ def sanitize_filename(path):
948
+ trash = [
949
+ "\\",
950
+ "/",
951
+ ":",
952
+ "*",
953
+ "?",
954
+ '"',
955
+ "<",
956
+ "|",
957
+ ">",
958
+ ]
959
+ for val in trash:
960
+ path = path.replace(val, "")
961
+ return path.strip()
962
+
963
+ if auto:
964
+ filename: str = message + "..." if len(message) <= 40 else message[:40]
965
+ save_to = cls.cache_dir / sanitize_filename(filename)
966
+ save_to = save_to.as_posix()
967
+
968
+ # Ensure cache_dir exists
969
+ cls.cache_dir.mkdir(parents=True, exist_ok=True)
970
+
971
+ if save_to:
972
+ if not save_to.endswith("mp3"):
973
+ save_to += ".mp3"
974
+
975
+ with open(save_to, "wb") as fh:
976
+ for chunk in resp.iter_content(chunk_size=512):
977
+ fh.write(chunk)
978
+ else:
979
+ return resp.content
980
+ return save_to
981
+
982
+ @staticmethod
983
+ def play(path_to_audio_file: Union[Path, str]) -> NoReturn:
984
+ """Play audio (.mp3) using playsound.
985
+ """
986
+ if not Path(path_to_audio_file).is_file():
987
+ raise FileNotFoundError(f"File does not exist - '{path_to_audio_file}'")
988
+ playsound(path_to_audio_file)
webscout/__init__.py CHANGED
@@ -24,10 +24,10 @@ webai = [
24
24
  "blackboxai",
25
25
  "g4fauto",
26
26
  "perplexity",
27
- "sean",
28
27
  "groq",
29
28
  "reka",
30
- "cohere"
29
+ "cohere",
30
+ "yepchat",
31
31
  ]
32
32
 
33
33
  gpt4free_providers = [
@@ -0,0 +1,33 @@
1
+ from webscout.AI import AsyncPhindSearch
2
+ from webscout.AI import AsyncYEPCHAT
3
+ from webscout.AI import AsyncOPENGPT
4
+ from webscout.AI import AsyncOPENAI
5
+ from webscout.AI import AsyncLLAMA2
6
+ from webscout.AI import AsyncLEO
7
+ from webscout.AI import AsyncKOBOLDAI
8
+ from webscout.AI import AsyncGROQ
9
+ from webscout.AI import AsyncBLACKBOXAI
10
+ from webscout.AI import AsyncGPT4FREE
11
+
12
+ mapper: dict[str, object] = {
13
+ "phind": AsyncPhindSearch,
14
+ "opengpt": AsyncOPENGPT,
15
+ "koboldai": AsyncKOBOLDAI,
16
+ "blackboxai": AsyncBLACKBOXAI,
17
+ "gpt4free": AsyncGPT4FREE,
18
+ "llama2": AsyncLLAMA2,
19
+ "yepchat": AsyncYEPCHAT,
20
+ "leo": AsyncLEO,
21
+ "groq": AsyncGROQ,
22
+ "openai": AsyncOPENAI,
23
+ }
24
+
25
+ tgpt_mapper: dict[str, object] = {
26
+ "phind": AsyncPhindSearch,
27
+ "opengpt": AsyncOPENGPT,
28
+ "koboldai": AsyncKOBOLDAI,
29
+ # "gpt4free": AsyncGPT4FREE,
30
+ "blackboxai": AsyncBLACKBOXAI,
31
+ "llama2": AsyncLLAMA2,
32
+ "yepchat": AsyncYEPCHAT,
33
+ }
webscout/exceptions.py CHANGED
@@ -7,4 +7,7 @@ class RatelimitE(Exception):
7
7
 
8
8
 
9
9
  class TimeoutE(Exception):
10
- """Raised for timeout errors during API requests."""
10
+ """Raised for timeout errors during API requests."""
11
+
12
+ class FailedToGenerateResponseError(Exception):
13
+ """Provider failed to fetch response"""