webscout 1.2.9__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/LLM.py CHANGED
@@ -8,7 +8,7 @@ class LLM:
8
8
  self.model = model
9
9
  self.conversation_history = [{"role": "system", "content": system_message}]
10
10
 
11
- def mistral_chat(self, messages: List[Dict[str, str]]) -> Union[str, None]:
11
+ def chat(self, messages: List[Dict[str, str]]) -> Union[str, None]:
12
12
  url = "https://api.deepinfra.com/v1/openai/chat/completions"
13
13
  headers = {
14
14
  'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
@@ -43,26 +43,3 @@ class LLM:
43
43
  return result.json()['choices'][0]['message']['content']
44
44
  except:
45
45
  return None
46
-
47
- def chat(self):
48
- while True:
49
- prompt = input("👦: ")
50
- user_message = {"role": "user", "content": prompt}
51
- self.conversation_history.append(user_message)
52
- try:
53
- resp = self.mistral_chat(self.conversation_history)
54
- print(f"🤖: {resp}")
55
- self.conversation_history.append({"role": "assistant", "content": resp})
56
- except Exception as e:
57
- print(f"🤖: Oops, something went wrong: {e}! Looks like even AI needs some oiling sometimes.")
58
-
59
- if __name__ == "__main__":
60
- parser = argparse.ArgumentParser(description='LLM CLI', epilog='To use a specific model, run:\n'
61
- 'python -m webscout.LLM model_name\n'
62
- 'Replace "model_name" with the name of the model you wish to use It supports ALL text generation models on deepinfra.com.')
63
- parser.add_argument('model', type=str, help='Model to use for text generation. Specify the full model name, e.g., "mistralai/Mistral-7B-Instruct-v0.1".')
64
- parser.add_argument('--system-message', type=str, default="You are a Helpful AI.", help='Custom system prompt for the AI.')
65
- args = parser.parse_args()
66
-
67
- LLM = LLM(args.model, args.system_message)
68
- LLM.chat()
webscout/__init__.py CHANGED
@@ -10,6 +10,8 @@ from .webscout_search_async import AsyncWEBS
10
10
  from .version import __version__
11
11
  from .DWEBS import DeepWEBS
12
12
  from .transcriber import transcriber
13
+ from .voice import play_audio
14
+ from .LLM import LLM
13
15
 
14
16
 
15
17
  __all__ = ["WEBS", "AsyncWEBS", "__version__", "cli"]
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "1.2.9"
1
+ __version__ = "1.3.1"
2
2
 
webscout/voice.py ADDED
@@ -0,0 +1,27 @@
1
+ import requests
2
+ import typing
3
+
4
+ def play_audio(message: str, voice: str = "Brian") -> typing.Union[str, typing.NoReturn]:
5
+ """
6
+ Text to speech using StreamElements API
7
+
8
+ Parameters:
9
+ message (str): The text to convert to speech
10
+ voice (str): The voice to use for speech synthesis. Default is "Brian".
11
+
12
+ Returns:
13
+ result (Union[str, None]): Temporary file path or None in failure
14
+ """
15
+ # Base URL for provider API
16
+ url: str = f"https://api.streamelements.com/kappa/v2/speech?voice={voice}&text={{{message}}}"
17
+
18
+ # Request headers
19
+ headers: typing.Dict[str, str] = {
20
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36'
21
+ }
22
+ # Try to send request or return None on failure
23
+ try:
24
+ result = requests.get(url=url, headers=headers)
25
+ return result.content
26
+ except:
27
+ return None
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.2.9
4
- Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models and now can transcribe yt videos
3
+ Version: 1.3.1
4
+ Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos and have TTS support
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
7
  License: HelpingAI Simplified Universal License
@@ -72,6 +72,9 @@ Search for words, documents, images, videos, news, maps and text translation usi
72
72
  - [Activating DeepWEBS](#activating-deepwebs)
73
73
  - [Point to remember before using `DeepWEBS`](#point-to-remember-before-using-deepwebs)
74
74
  - [Usage Example](#usage-example)
75
+ - [Text-to-Speech:](#text-to-speech)
76
+ - [Available TTS Voices:](#available-tts-voices)
77
+ - [ALL voices:](#all-voices)
75
78
  - [WEBS and AsyncWEBS classes](#webs-and-asyncwebs-classes)
76
79
  - [Exceptions](#exceptions)
77
80
  - [usage of webscout](#usage-of-webscout)
@@ -96,6 +99,7 @@ Search for words, documents, images, videos, news, maps and text translation usi
96
99
  - [9. `KOBOLDIA` -](#9-koboldia--)
97
100
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
98
101
  - [`LLM`](#llm)
102
+ - [`LLM` with internet](#llm-with-internet)
99
103
 
100
104
  ## Install
101
105
  ```python
@@ -337,6 +341,23 @@ if __name__ == "__main__":
337
341
  main()
338
342
 
339
343
  ```
344
+ ## Text-to-Speech:
345
+ ```python
346
+ from webscout import play_audio
347
+
348
+ message = "This is an example of text-to-speech."
349
+ audio_content = play_audio(message, voice="Brian")
350
+
351
+ # Save the audio to a file
352
+ with open("output.mp3", "wb") as f:
353
+ f.write(audio_content)
354
+ ```
355
+ ### Available TTS Voices:
356
+ You can choose from a wide range of voices, including:
357
+ - Filiz, Astrid, Tatyana, Maxim, Carmen, Ines, Cristiano, Vitoria, Ricardo, Maja, Jan, Jacek, Ewa, Ruben, Lotte, Liv, Seoyeon, Takumi, Mizuki, Giorgio, Carla, Bianca, Karl, Dora, Mathieu, Celine, Chantal, Penelope, Miguel, Mia, Enrique, Conchita, Geraint, Salli, Matthew, Kimberly, Kendra, Justin, Joey, Joanna, Ivy, Raveena, Aditi, Emma, Brian, Amy, Russell, Nicole, Vicki, Marlene, Hans, Naja, Mads, Gwyneth, Zhiyu
358
+ - Standard and WaveNet voices for various languages (e.g., en-US, es-ES, ja-JP, etc.)
359
+ ### ALL voices:
360
+ [Filiz, Astrid, Tatyana, Maxim, Carmen, Ines, Cristiano, Vitoria, Ricardo, Maja, Jan, Jacek, Ewa, Ruben, Lotte, Liv, Seoyeon, Takumi, Mizuki, Giorgio, Carla, Bianca, Karl, Dora, Mathieu, Celine, Chantal, Penelope, Miguel, Mia, Enrique, Conchita, Geraint, Salli, Matthew, Kimberly, Kendra, Justin, Joey, Joanna, Ivy, Raveena, Aditi, Emma, Brian, Amy, Russell, Nicole, Vicki, Marlene, Hans, Naja, Mads, Gwyneth, Zhiyu, es-ES-Standard-A, it-IT-Standard-A, it-IT-Wavenet-A, ja-JP-Standard-A, ja-JP-Wavenet-A, ko-KR-Standard-A, ko-KR-Wavenet-A, pt-BR-Standard-A, tr-TR-Standard-A, sv-SE-Standard-A, nl-NL-Standard-A, nl-NL-Wavenet-A, en-US-Wavenet-A, en-US-Wavenet-B, en-US-Wavenet-C, en-US-Wavenet-D, en-US-Wavenet-E, en-US-Wavenet-F, en-GB-Standard-A, en-GB-Standard-B, en-GB-Standard-C, en-GB-Standard-D, en-GB-Wavenet-A, en-GB-Wavenet-B, en-GB-Wavenet-C, en-GB-Wavenet-D, en-US-Standard-B, en-US-Standard-C, en-US-Standard-D, en-US-Standard-E, de-DE-Standard-A, de-DE-Standard-B, de-DE-Wavenet-A, de-DE-Wavenet-B, de-DE-Wavenet-C, de-DE-Wavenet-D, en-AU-Standard-A, en-AU-Standard-B, en-AU-Wavenet-A, en-AU-Wavenet-B, en-AU-Wavenet-C, en-AU-Wavenet-D, en-AU-Standard-C, en-AU-Standard-D, fr-CA-Standard-A, fr-CA-Standard-B, fr-CA-Standard-C, fr-CA-Standard-D, fr-FR-Standard-C, fr-FR-Standard-D, fr-FR-Wavenet-A, fr-FR-Wavenet-B, fr-FR-Wavenet-C, fr-FR-Wavenet-D, da-DK-Wavenet-A, pl-PL-Wavenet-A, pl-PL-Wavenet-B, pl-PL-Wavenet-C, pl-PL-Wavenet-D, pt-PT-Wavenet-A, pt-PT-Wavenet-B, pt-PT-Wavenet-C, pt-PT-Wavenet-D, ru-RU-Wavenet-A, ru-RU-Wavenet-B, ru-RU-Wavenet-C, ru-RU-Wavenet-D, sk-SK-Wavenet-A, tr-TR-Wavenet-A, tr-TR-Wavenet-B, tr-TR-Wavenet-C, tr-TR-Wavenet-D, tr-TR-Wavenet-E, uk-UA-Wavenet-A, ar-XA-Wavenet-A, ar-XA-Wavenet-B, ar-XA-Wavenet-C, cs-CZ-Wavenet-A, nl-NL-Wavenet-B, nl-NL-Wavenet-C, nl-NL-Wavenet-D, nl-NL-Wavenet-E, en-IN-Wavenet-A, en-IN-Wavenet-B, en-IN-Wavenet-C, fil-PH-Wavenet-A, fi-FI-Wavenet-A, el-GR-Wavenet-A, hi-IN-Wavenet-A, hi-IN-Wavenet-B, hi-IN-Wavenet-C, hu-HU-Wavenet-A, id-ID-Wavenet-A, id-ID-Wavenet-B, id-ID-Wavenet-C, it-IT-Wavenet-B, it-IT-Wavenet-C, it-IT-Wavenet-D, ja-JP-Wavenet-B, ja-JP-Wavenet-C, ja-JP-Wavenet-D, cmn-CN-Wavenet-A, cmn-CN-Wavenet-B, cmn-CN-Wavenet-C, cmn-CN-Wavenet-D, nb-no-Wavenet-E, nb-no-Wavenet-A, nb-no-Wavenet-B, nb-no-Wavenet-C, nb-no-Wavenet-D, vi-VN-Wavenet-A, vi-VN-Wavenet-B, vi-VN-Wavenet-C, vi-VN-Wavenet-D, sr-rs-Standard-A, lv-lv-Standard-A, is-is-Standard-A, bg-bg-Standard-A, af-ZA-Standard-A, Tracy, Danny, Huihui, Yaoyao, Kangkang, HanHan, Zhiwei, Asaf, An, Stefanos, Filip, Ivan, Heidi, Herena, Kalpana, Hemant, Matej, Andika, Rizwan, Lado, Valluvar, Linda, Heather, Sean, Michael, Karsten, Guillaume, Pattara, Jakub, Szabolcs, Hoda, Naayf]
340
361
  ## WEBS and AsyncWEBS classes
341
362
 
342
363
  The WEBS and AsyncWEBS classes are used to retrieve search results from DuckDuckGo.com and yep.com periodically.
@@ -684,11 +705,102 @@ print(message)
684
705
  ```python
685
706
  from webscout.LLM import LLM
686
707
 
687
- def chat(model_name, system_message="You are Jarvis"):# system prompt
688
- AI = LLM(model_name, system_message)
689
- AI.chat()
708
+ # Read the system message from the file
709
+ with open('system.txt', 'r') as file:
710
+ system_message = file.read()
711
+
712
+ # Initialize the LLM class with the model name and system message
713
+ llm = LLM(model="microsoft/WizardLM-2-8x22B", system_message=system_message)
714
+
715
+ while True:
716
+ # Get the user input
717
+ user_input = input("User: ")
718
+
719
+ # Define the messages to be sent
720
+ messages = [
721
+ {"role": "user", "content": user_input}
722
+ ]
723
+
724
+ # Use the mistral_chat method to get the response
725
+ response = llm.chat(messages)
726
+
727
+ # Print the response
728
+ print("AI: ", response)
729
+ ```
730
+ ### `LLM` with internet
731
+ ```python
732
+ from __future__ import annotations
733
+ from typing import List, Optional
734
+
735
+ from webscout import LLM
736
+ from webscout import WEBS
737
+ import warnings
738
+
739
+ system_message: str = (
740
+ "As AI, you possess internet access and are capable of executing real-time web searches based on user inputs. "
741
+ "You shall utilize this capability to enrich conversations, offer informed insights, and augment your ability to "
742
+ "respond accurately and thoroughly. However, refrain from stating 'You have provided a list of strings,' ensuring "
743
+ "seamless interactions with users. Embrace a responsive demeanor, harnessing available online resources to address "
744
+ "queries, share pertinent content, and facilitate meaningful exchanges. By doing so, you create value through "
745
+ "connection and engagement, ultimately enhancing overall user satisfaction and experience. Additionally, "
746
+ "continue upholding the principles of respect, impartiality, and intellectual integrity throughout all interactions."
747
+ )
748
+
749
+ # Ignore the specific UserWarning
750
+ warnings.filterwarnings("ignore", category=UserWarning, module="curl_cffi.aio", lineno=205)
751
+ LLM = LLM(model="meta-llama/Meta-Llama-3-70B-Instruct", system_message=system_message)
752
+
753
+
754
+ def chat(
755
+ user_input: str, webs: WEBS, max_results: int = 10
756
+ ) -> Optional[str]:
757
+ """
758
+ Chat function to perform a web search based on the user input and generate a response using the LLM model.
759
+
760
+ Parameters
761
+ ----------
762
+ user_input : str
763
+ The user input to be used for the web search
764
+ webs : WEBS
765
+ The web search instance to be used to perform the search
766
+ max_results : int, optional
767
+ The maximum number of search results to include in the response, by default 10
768
+
769
+ Returns
770
+ -------
771
+ Optional[str]
772
+ The response generated by the LLM model, or None if there is no response
773
+ """
774
+ # Perform a web search based on the user input
775
+ search_results: List[str] = []
776
+ for r in webs.text(
777
+ user_input, region="wt-wt", safesearch="off", timelimit="y", max_results=max_results
778
+ ):
779
+ search_results.append(str(r)) # Convert each result to a string
780
+
781
+ # Define the messages to be sent, including the user input, search results, and system message
782
+ messages = [
783
+ {"role": "user", "content": user_input + "\n" + "websearch results are:" + "\n".join(search_results)},
784
+ ]
785
+
786
+ # Use the chat method to get the response
787
+ response = LLM.chat(messages)
788
+
789
+ return response
790
+
690
791
 
691
792
  if __name__ == "__main__":
692
- model_name = "mistralai/Mistral-7B-Instruct-v0.2" # name of the model you wish to use It supports ALL text generation models on deepinfra.com.
693
- chat(model_name)
793
+ while True:
794
+ # Get the user input
795
+ user_input = input("User: ")
796
+
797
+ # Perform a web search based on the user input
798
+ with WEBS() as webs:
799
+ response = chat(user_input, webs)
800
+
801
+ # Print the response
802
+ if response:
803
+ print("AI:", response)
804
+ else:
805
+ print("No response")
694
806
  ```
@@ -15,20 +15,21 @@ webscout/AIbase.py,sha256=vQi2ougu5bG-QdmoYmxCQsOg7KTEgG7EF6nZh5qqUGw,2343
15
15
  webscout/AIutel.py,sha256=fNN4mmjXcxjJGq2CVJP1MU2oQ78p8OyExQBjVif6e-k,24123
16
16
  webscout/DWEBS.py,sha256=QT-7-dUgWhQ_H7EVZD53AVyXxyskoPMKCkFIpzkN56Q,7332
17
17
  webscout/HelpingAI.py,sha256=YeZw0zYVHMcBFFPNdd3_Ghpm9ebt_EScQjHO_IIs4lg,8103
18
- webscout/LLM.py,sha256=XByJPiATLA_57FBWKw18Xx_PGRCPOj-GJE96aQH1k2Y,3309
19
- webscout/__init__.py,sha256=lUA_Bkot1Uo6LfhER6lI1CODUkdGTMdSH3bZ_sg510Q,519
18
+ webscout/LLM.py,sha256=CiDz0okZNEoXuxMwadZnwRGSLpqk2zg0vzvXSxQZjcE,1910
19
+ webscout/__init__.py,sha256=rgmTILV0qx0x2PVdMq7flk5nas102sQN5z8p_OZaTzg,572
20
20
  webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
21
21
  webscout/cli.py,sha256=F888fdrFUQgczMBN4yMOSf6Nh-IbvkqpPhDsbnA2FtQ,17059
22
22
  webscout/exceptions.py,sha256=4AOO5wexeL96nvUS-badcckcwrPS7UpZyAgB9vknHZE,276
23
23
  webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
24
24
  webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
25
25
  webscout/utils.py,sha256=c_98M4oqpb54pUun3fpGGlCerFD6ZHUbghyp5b7Mwgo,2605
26
- webscout/version.py,sha256=T6eydtWX8etrNY1HI44jOpHvKUa_ijIFKoDz4vHehGM,25
26
+ webscout/version.py,sha256=xBny7vFjm_5sB7eDRX5tmYecHaNfJlEkwuSeLudsFXo,25
27
+ webscout/voice.py,sha256=1Ids_2ToPBMX0cH_UyPMkY_6eSE9H4Gazrl0ujPmFag,941
27
28
  webscout/webscout_search.py,sha256=3_lli-hDb8_kCGwscK29xuUcOS833ROgpNhDzrxh0dk,3085
28
29
  webscout/webscout_search_async.py,sha256=Y5frH0k3hLqBCR-8dn7a_b7EvxdYxn6wHiKl3jWosE0,40670
29
- webscout-1.2.9.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
30
- webscout-1.2.9.dist-info/METADATA,sha256=io3mb7v5QOdaMg_VjjpQy4P6vvuTnun8T3MJNuxfLgM,24527
31
- webscout-1.2.9.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
32
- webscout-1.2.9.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
33
- webscout-1.2.9.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
34
- webscout-1.2.9.dist-info/RECORD,,
30
+ webscout-1.3.1.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
31
+ webscout-1.3.1.dist-info/METADATA,sha256=Lqt3sYRj9TpssiZ_mD3z0Sh_UrdVPPm5L9o0hPAIFWw,31547
32
+ webscout-1.3.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
33
+ webscout-1.3.1.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
34
+ webscout-1.3.1.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
35
+ webscout-1.3.1.dist-info/RECORD,,