webscout 1.2.9__tar.gz → 1.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (41) hide show
  1. {webscout-1.2.9 → webscout-1.3.1}/PKG-INFO +119 -7
  2. {webscout-1.2.9 → webscout-1.3.1}/README.md +118 -6
  3. {webscout-1.2.9 → webscout-1.3.1}/setup.py +2 -6
  4. {webscout-1.2.9 → webscout-1.3.1}/webscout/LLM.py +1 -24
  5. {webscout-1.2.9 → webscout-1.3.1}/webscout/__init__.py +2 -0
  6. webscout-1.3.1/webscout/version.py +2 -0
  7. webscout-1.3.1/webscout/voice.py +27 -0
  8. {webscout-1.2.9 → webscout-1.3.1}/webscout.egg-info/PKG-INFO +119 -7
  9. {webscout-1.2.9 → webscout-1.3.1}/webscout.egg-info/SOURCES.txt +1 -0
  10. webscout-1.2.9/webscout/version.py +0 -2
  11. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/__init__.py +0 -0
  12. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/documents/__init__.py +0 -0
  13. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/documents/query_results_extractor.py +0 -0
  14. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
  15. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/networks/__init__.py +0 -0
  16. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/networks/filepath_converter.py +0 -0
  17. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/networks/google_searcher.py +0 -0
  18. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/networks/network_configs.py +0 -0
  19. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/networks/webpage_fetcher.py +0 -0
  20. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/utilsdw/__init__.py +0 -0
  21. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/utilsdw/enver.py +0 -0
  22. {webscout-1.2.9 → webscout-1.3.1}/DeepWEBS/utilsdw/logger.py +0 -0
  23. {webscout-1.2.9 → webscout-1.3.1}/LICENSE.md +0 -0
  24. {webscout-1.2.9 → webscout-1.3.1}/setup.cfg +0 -0
  25. {webscout-1.2.9 → webscout-1.3.1}/webscout/AI.py +0 -0
  26. {webscout-1.2.9 → webscout-1.3.1}/webscout/AIbase.py +0 -0
  27. {webscout-1.2.9 → webscout-1.3.1}/webscout/AIutel.py +0 -0
  28. {webscout-1.2.9 → webscout-1.3.1}/webscout/DWEBS.py +0 -0
  29. {webscout-1.2.9 → webscout-1.3.1}/webscout/HelpingAI.py +0 -0
  30. {webscout-1.2.9 → webscout-1.3.1}/webscout/__main__.py +0 -0
  31. {webscout-1.2.9 → webscout-1.3.1}/webscout/cli.py +0 -0
  32. {webscout-1.2.9 → webscout-1.3.1}/webscout/exceptions.py +0 -0
  33. {webscout-1.2.9 → webscout-1.3.1}/webscout/models.py +0 -0
  34. {webscout-1.2.9 → webscout-1.3.1}/webscout/transcriber.py +0 -0
  35. {webscout-1.2.9 → webscout-1.3.1}/webscout/utils.py +0 -0
  36. {webscout-1.2.9 → webscout-1.3.1}/webscout/webscout_search.py +0 -0
  37. {webscout-1.2.9 → webscout-1.3.1}/webscout/webscout_search_async.py +0 -0
  38. {webscout-1.2.9 → webscout-1.3.1}/webscout.egg-info/dependency_links.txt +0 -0
  39. {webscout-1.2.9 → webscout-1.3.1}/webscout.egg-info/entry_points.txt +0 -0
  40. {webscout-1.2.9 → webscout-1.3.1}/webscout.egg-info/requires.txt +0 -0
  41. {webscout-1.2.9 → webscout-1.3.1}/webscout.egg-info/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.2.9
4
- Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models and now can transcribe yt videos
3
+ Version: 1.3.1
4
+ Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos and have TTS support
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
7
  License: HelpingAI Simplified Universal License
@@ -72,6 +72,9 @@ Search for words, documents, images, videos, news, maps and text translation usi
72
72
  - [Activating DeepWEBS](#activating-deepwebs)
73
73
  - [Point to remember before using `DeepWEBS`](#point-to-remember-before-using-deepwebs)
74
74
  - [Usage Example](#usage-example)
75
+ - [Text-to-Speech:](#text-to-speech)
76
+ - [Available TTS Voices:](#available-tts-voices)
77
+ - [ALL voices:](#all-voices)
75
78
  - [WEBS and AsyncWEBS classes](#webs-and-asyncwebs-classes)
76
79
  - [Exceptions](#exceptions)
77
80
  - [usage of webscout](#usage-of-webscout)
@@ -96,6 +99,7 @@ Search for words, documents, images, videos, news, maps and text translation usi
96
99
  - [9. `KOBOLDIA` -](#9-koboldia--)
97
100
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
98
101
  - [`LLM`](#llm)
102
+ - [`LLM` with internet](#llm-with-internet)
99
103
 
100
104
  ## Install
101
105
  ```python
@@ -337,6 +341,23 @@ if __name__ == "__main__":
337
341
  main()
338
342
 
339
343
  ```
344
+ ## Text-to-Speech:
345
+ ```python
346
+ from webscout import play_audio
347
+
348
+ message = "This is an example of text-to-speech."
349
+ audio_content = play_audio(message, voice="Brian")
350
+
351
+ # Save the audio to a file
352
+ with open("output.mp3", "wb") as f:
353
+ f.write(audio_content)
354
+ ```
355
+ ### Available TTS Voices:
356
+ You can choose from a wide range of voices, including:
357
+ - Filiz, Astrid, Tatyana, Maxim, Carmen, Ines, Cristiano, Vitoria, Ricardo, Maja, Jan, Jacek, Ewa, Ruben, Lotte, Liv, Seoyeon, Takumi, Mizuki, Giorgio, Carla, Bianca, Karl, Dora, Mathieu, Celine, Chantal, Penelope, Miguel, Mia, Enrique, Conchita, Geraint, Salli, Matthew, Kimberly, Kendra, Justin, Joey, Joanna, Ivy, Raveena, Aditi, Emma, Brian, Amy, Russell, Nicole, Vicki, Marlene, Hans, Naja, Mads, Gwyneth, Zhiyu
358
+ - Standard and WaveNet voices for various languages (e.g., en-US, es-ES, ja-JP, etc.)
359
+ ### ALL voices:
360
+ [Filiz, Astrid, Tatyana, Maxim, Carmen, Ines, Cristiano, Vitoria, Ricardo, Maja, Jan, Jacek, Ewa, Ruben, Lotte, Liv, Seoyeon, Takumi, Mizuki, Giorgio, Carla, Bianca, Karl, Dora, Mathieu, Celine, Chantal, Penelope, Miguel, Mia, Enrique, Conchita, Geraint, Salli, Matthew, Kimberly, Kendra, Justin, Joey, Joanna, Ivy, Raveena, Aditi, Emma, Brian, Amy, Russell, Nicole, Vicki, Marlene, Hans, Naja, Mads, Gwyneth, Zhiyu, es-ES-Standard-A, it-IT-Standard-A, it-IT-Wavenet-A, ja-JP-Standard-A, ja-JP-Wavenet-A, ko-KR-Standard-A, ko-KR-Wavenet-A, pt-BR-Standard-A, tr-TR-Standard-A, sv-SE-Standard-A, nl-NL-Standard-A, nl-NL-Wavenet-A, en-US-Wavenet-A, en-US-Wavenet-B, en-US-Wavenet-C, en-US-Wavenet-D, en-US-Wavenet-E, en-US-Wavenet-F, en-GB-Standard-A, en-GB-Standard-B, en-GB-Standard-C, en-GB-Standard-D, en-GB-Wavenet-A, en-GB-Wavenet-B, en-GB-Wavenet-C, en-GB-Wavenet-D, en-US-Standard-B, en-US-Standard-C, en-US-Standard-D, en-US-Standard-E, de-DE-Standard-A, de-DE-Standard-B, de-DE-Wavenet-A, de-DE-Wavenet-B, de-DE-Wavenet-C, de-DE-Wavenet-D, en-AU-Standard-A, en-AU-Standard-B, en-AU-Wavenet-A, en-AU-Wavenet-B, en-AU-Wavenet-C, en-AU-Wavenet-D, en-AU-Standard-C, en-AU-Standard-D, fr-CA-Standard-A, fr-CA-Standard-B, fr-CA-Standard-C, fr-CA-Standard-D, fr-FR-Standard-C, fr-FR-Standard-D, fr-FR-Wavenet-A, fr-FR-Wavenet-B, fr-FR-Wavenet-C, fr-FR-Wavenet-D, da-DK-Wavenet-A, pl-PL-Wavenet-A, pl-PL-Wavenet-B, pl-PL-Wavenet-C, pl-PL-Wavenet-D, pt-PT-Wavenet-A, pt-PT-Wavenet-B, pt-PT-Wavenet-C, pt-PT-Wavenet-D, ru-RU-Wavenet-A, ru-RU-Wavenet-B, ru-RU-Wavenet-C, ru-RU-Wavenet-D, sk-SK-Wavenet-A, tr-TR-Wavenet-A, tr-TR-Wavenet-B, tr-TR-Wavenet-C, tr-TR-Wavenet-D, tr-TR-Wavenet-E, uk-UA-Wavenet-A, ar-XA-Wavenet-A, ar-XA-Wavenet-B, ar-XA-Wavenet-C, cs-CZ-Wavenet-A, nl-NL-Wavenet-B, nl-NL-Wavenet-C, nl-NL-Wavenet-D, nl-NL-Wavenet-E, en-IN-Wavenet-A, en-IN-Wavenet-B, en-IN-Wavenet-C, fil-PH-Wavenet-A, fi-FI-Wavenet-A, el-GR-Wavenet-A, hi-IN-Wavenet-A, hi-IN-Wavenet-B, hi-IN-Wavenet-C, hu-HU-Wavenet-A, id-ID-Wavenet-A, id-ID-Wavenet-B, id-ID-Wavenet-C, it-IT-Wavenet-B, it-IT-Wavenet-C, it-IT-Wavenet-D, ja-JP-Wavenet-B, ja-JP-Wavenet-C, ja-JP-Wavenet-D, cmn-CN-Wavenet-A, cmn-CN-Wavenet-B, cmn-CN-Wavenet-C, cmn-CN-Wavenet-D, nb-no-Wavenet-E, nb-no-Wavenet-A, nb-no-Wavenet-B, nb-no-Wavenet-C, nb-no-Wavenet-D, vi-VN-Wavenet-A, vi-VN-Wavenet-B, vi-VN-Wavenet-C, vi-VN-Wavenet-D, sr-rs-Standard-A, lv-lv-Standard-A, is-is-Standard-A, bg-bg-Standard-A, af-ZA-Standard-A, Tracy, Danny, Huihui, Yaoyao, Kangkang, HanHan, Zhiwei, Asaf, An, Stefanos, Filip, Ivan, Heidi, Herena, Kalpana, Hemant, Matej, Andika, Rizwan, Lado, Valluvar, Linda, Heather, Sean, Michael, Karsten, Guillaume, Pattara, Jakub, Szabolcs, Hoda, Naayf]
340
361
  ## WEBS and AsyncWEBS classes
341
362
 
342
363
  The WEBS and AsyncWEBS classes are used to retrieve search results from DuckDuckGo.com and yep.com periodically.
@@ -684,11 +705,102 @@ print(message)
684
705
  ```python
685
706
  from webscout.LLM import LLM
686
707
 
687
- def chat(model_name, system_message="You are Jarvis"):# system prompt
688
- AI = LLM(model_name, system_message)
689
- AI.chat()
708
+ # Read the system message from the file
709
+ with open('system.txt', 'r') as file:
710
+ system_message = file.read()
711
+
712
+ # Initialize the LLM class with the model name and system message
713
+ llm = LLM(model="microsoft/WizardLM-2-8x22B", system_message=system_message)
714
+
715
+ while True:
716
+ # Get the user input
717
+ user_input = input("User: ")
718
+
719
+ # Define the messages to be sent
720
+ messages = [
721
+ {"role": "user", "content": user_input}
722
+ ]
723
+
724
+ # Use the mistral_chat method to get the response
725
+ response = llm.chat(messages)
726
+
727
+ # Print the response
728
+ print("AI: ", response)
729
+ ```
730
+ ### `LLM` with internet
731
+ ```python
732
+ from __future__ import annotations
733
+ from typing import List, Optional
734
+
735
+ from webscout import LLM
736
+ from webscout import WEBS
737
+ import warnings
738
+
739
+ system_message: str = (
740
+ "As AI, you possess internet access and are capable of executing real-time web searches based on user inputs. "
741
+ "You shall utilize this capability to enrich conversations, offer informed insights, and augment your ability to "
742
+ "respond accurately and thoroughly. However, refrain from stating 'You have provided a list of strings,' ensuring "
743
+ "seamless interactions with users. Embrace a responsive demeanor, harnessing available online resources to address "
744
+ "queries, share pertinent content, and facilitate meaningful exchanges. By doing so, you create value through "
745
+ "connection and engagement, ultimately enhancing overall user satisfaction and experience. Additionally, "
746
+ "continue upholding the principles of respect, impartiality, and intellectual integrity throughout all interactions."
747
+ )
748
+
749
+ # Ignore the specific UserWarning
750
+ warnings.filterwarnings("ignore", category=UserWarning, module="curl_cffi.aio", lineno=205)
751
+ LLM = LLM(model="meta-llama/Meta-Llama-3-70B-Instruct", system_message=system_message)
752
+
753
+
754
+ def chat(
755
+ user_input: str, webs: WEBS, max_results: int = 10
756
+ ) -> Optional[str]:
757
+ """
758
+ Chat function to perform a web search based on the user input and generate a response using the LLM model.
759
+
760
+ Parameters
761
+ ----------
762
+ user_input : str
763
+ The user input to be used for the web search
764
+ webs : WEBS
765
+ The web search instance to be used to perform the search
766
+ max_results : int, optional
767
+ The maximum number of search results to include in the response, by default 10
768
+
769
+ Returns
770
+ -------
771
+ Optional[str]
772
+ The response generated by the LLM model, or None if there is no response
773
+ """
774
+ # Perform a web search based on the user input
775
+ search_results: List[str] = []
776
+ for r in webs.text(
777
+ user_input, region="wt-wt", safesearch="off", timelimit="y", max_results=max_results
778
+ ):
779
+ search_results.append(str(r)) # Convert each result to a string
780
+
781
+ # Define the messages to be sent, including the user input, search results, and system message
782
+ messages = [
783
+ {"role": "user", "content": user_input + "\n" + "websearch results are:" + "\n".join(search_results)},
784
+ ]
785
+
786
+ # Use the chat method to get the response
787
+ response = LLM.chat(messages)
788
+
789
+ return response
790
+
690
791
 
691
792
  if __name__ == "__main__":
692
- model_name = "mistralai/Mistral-7B-Instruct-v0.2" # name of the model you wish to use It supports ALL text generation models on deepinfra.com.
693
- chat(model_name)
793
+ while True:
794
+ # Get the user input
795
+ user_input = input("User: ")
796
+
797
+ # Perform a web search based on the user input
798
+ with WEBS() as webs:
799
+ response = chat(user_input, webs)
800
+
801
+ # Print the response
802
+ if response:
803
+ print("AI:", response)
804
+ else:
805
+ print("No response")
694
806
  ```
@@ -20,6 +20,9 @@ Search for words, documents, images, videos, news, maps and text translation usi
20
20
  - [Activating DeepWEBS](#activating-deepwebs)
21
21
  - [Point to remember before using `DeepWEBS`](#point-to-remember-before-using-deepwebs)
22
22
  - [Usage Example](#usage-example)
23
+ - [Text-to-Speech:](#text-to-speech)
24
+ - [Available TTS Voices:](#available-tts-voices)
25
+ - [ALL voices:](#all-voices)
23
26
  - [WEBS and AsyncWEBS classes](#webs-and-asyncwebs-classes)
24
27
  - [Exceptions](#exceptions)
25
28
  - [usage of webscout](#usage-of-webscout)
@@ -44,6 +47,7 @@ Search for words, documents, images, videos, news, maps and text translation usi
44
47
  - [9. `KOBOLDIA` -](#9-koboldia--)
45
48
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
46
49
  - [`LLM`](#llm)
50
+ - [`LLM` with internet](#llm-with-internet)
47
51
 
48
52
  ## Install
49
53
  ```python
@@ -285,6 +289,23 @@ if __name__ == "__main__":
285
289
  main()
286
290
 
287
291
  ```
292
+ ## Text-to-Speech:
293
+ ```python
294
+ from webscout import play_audio
295
+
296
+ message = "This is an example of text-to-speech."
297
+ audio_content = play_audio(message, voice="Brian")
298
+
299
+ # Save the audio to a file
300
+ with open("output.mp3", "wb") as f:
301
+ f.write(audio_content)
302
+ ```
303
+ ### Available TTS Voices:
304
+ You can choose from a wide range of voices, including:
305
+ - Filiz, Astrid, Tatyana, Maxim, Carmen, Ines, Cristiano, Vitoria, Ricardo, Maja, Jan, Jacek, Ewa, Ruben, Lotte, Liv, Seoyeon, Takumi, Mizuki, Giorgio, Carla, Bianca, Karl, Dora, Mathieu, Celine, Chantal, Penelope, Miguel, Mia, Enrique, Conchita, Geraint, Salli, Matthew, Kimberly, Kendra, Justin, Joey, Joanna, Ivy, Raveena, Aditi, Emma, Brian, Amy, Russell, Nicole, Vicki, Marlene, Hans, Naja, Mads, Gwyneth, Zhiyu
306
+ - Standard and WaveNet voices for various languages (e.g., en-US, es-ES, ja-JP, etc.)
307
+ ### ALL voices:
308
+ [Filiz, Astrid, Tatyana, Maxim, Carmen, Ines, Cristiano, Vitoria, Ricardo, Maja, Jan, Jacek, Ewa, Ruben, Lotte, Liv, Seoyeon, Takumi, Mizuki, Giorgio, Carla, Bianca, Karl, Dora, Mathieu, Celine, Chantal, Penelope, Miguel, Mia, Enrique, Conchita, Geraint, Salli, Matthew, Kimberly, Kendra, Justin, Joey, Joanna, Ivy, Raveena, Aditi, Emma, Brian, Amy, Russell, Nicole, Vicki, Marlene, Hans, Naja, Mads, Gwyneth, Zhiyu, es-ES-Standard-A, it-IT-Standard-A, it-IT-Wavenet-A, ja-JP-Standard-A, ja-JP-Wavenet-A, ko-KR-Standard-A, ko-KR-Wavenet-A, pt-BR-Standard-A, tr-TR-Standard-A, sv-SE-Standard-A, nl-NL-Standard-A, nl-NL-Wavenet-A, en-US-Wavenet-A, en-US-Wavenet-B, en-US-Wavenet-C, en-US-Wavenet-D, en-US-Wavenet-E, en-US-Wavenet-F, en-GB-Standard-A, en-GB-Standard-B, en-GB-Standard-C, en-GB-Standard-D, en-GB-Wavenet-A, en-GB-Wavenet-B, en-GB-Wavenet-C, en-GB-Wavenet-D, en-US-Standard-B, en-US-Standard-C, en-US-Standard-D, en-US-Standard-E, de-DE-Standard-A, de-DE-Standard-B, de-DE-Wavenet-A, de-DE-Wavenet-B, de-DE-Wavenet-C, de-DE-Wavenet-D, en-AU-Standard-A, en-AU-Standard-B, en-AU-Wavenet-A, en-AU-Wavenet-B, en-AU-Wavenet-C, en-AU-Wavenet-D, en-AU-Standard-C, en-AU-Standard-D, fr-CA-Standard-A, fr-CA-Standard-B, fr-CA-Standard-C, fr-CA-Standard-D, fr-FR-Standard-C, fr-FR-Standard-D, fr-FR-Wavenet-A, fr-FR-Wavenet-B, fr-FR-Wavenet-C, fr-FR-Wavenet-D, da-DK-Wavenet-A, pl-PL-Wavenet-A, pl-PL-Wavenet-B, pl-PL-Wavenet-C, pl-PL-Wavenet-D, pt-PT-Wavenet-A, pt-PT-Wavenet-B, pt-PT-Wavenet-C, pt-PT-Wavenet-D, ru-RU-Wavenet-A, ru-RU-Wavenet-B, ru-RU-Wavenet-C, ru-RU-Wavenet-D, sk-SK-Wavenet-A, tr-TR-Wavenet-A, tr-TR-Wavenet-B, tr-TR-Wavenet-C, tr-TR-Wavenet-D, tr-TR-Wavenet-E, uk-UA-Wavenet-A, ar-XA-Wavenet-A, ar-XA-Wavenet-B, ar-XA-Wavenet-C, cs-CZ-Wavenet-A, nl-NL-Wavenet-B, nl-NL-Wavenet-C, nl-NL-Wavenet-D, nl-NL-Wavenet-E, en-IN-Wavenet-A, en-IN-Wavenet-B, en-IN-Wavenet-C, fil-PH-Wavenet-A, fi-FI-Wavenet-A, el-GR-Wavenet-A, hi-IN-Wavenet-A, hi-IN-Wavenet-B, hi-IN-Wavenet-C, hu-HU-Wavenet-A, id-ID-Wavenet-A, id-ID-Wavenet-B, id-ID-Wavenet-C, it-IT-Wavenet-B, it-IT-Wavenet-C, it-IT-Wavenet-D, ja-JP-Wavenet-B, ja-JP-Wavenet-C, ja-JP-Wavenet-D, cmn-CN-Wavenet-A, cmn-CN-Wavenet-B, cmn-CN-Wavenet-C, cmn-CN-Wavenet-D, nb-no-Wavenet-E, nb-no-Wavenet-A, nb-no-Wavenet-B, nb-no-Wavenet-C, nb-no-Wavenet-D, vi-VN-Wavenet-A, vi-VN-Wavenet-B, vi-VN-Wavenet-C, vi-VN-Wavenet-D, sr-rs-Standard-A, lv-lv-Standard-A, is-is-Standard-A, bg-bg-Standard-A, af-ZA-Standard-A, Tracy, Danny, Huihui, Yaoyao, Kangkang, HanHan, Zhiwei, Asaf, An, Stefanos, Filip, Ivan, Heidi, Herena, Kalpana, Hemant, Matej, Andika, Rizwan, Lado, Valluvar, Linda, Heather, Sean, Michael, Karsten, Guillaume, Pattara, Jakub, Szabolcs, Hoda, Naayf]
288
309
  ## WEBS and AsyncWEBS classes
289
310
 
290
311
  The WEBS and AsyncWEBS classes are used to retrieve search results from DuckDuckGo.com and yep.com periodically.
@@ -632,11 +653,102 @@ print(message)
632
653
  ```python
633
654
  from webscout.LLM import LLM
634
655
 
635
- def chat(model_name, system_message="You are Jarvis"):# system prompt
636
- AI = LLM(model_name, system_message)
637
- AI.chat()
656
+ # Read the system message from the file
657
+ with open('system.txt', 'r') as file:
658
+ system_message = file.read()
638
659
 
639
- if __name__ == "__main__":
640
- model_name = "mistralai/Mistral-7B-Instruct-v0.2" # name of the model you wish to use It supports ALL text generation models on deepinfra.com.
641
- chat(model_name)
660
+ # Initialize the LLM class with the model name and system message
661
+ llm = LLM(model="microsoft/WizardLM-2-8x22B", system_message=system_message)
662
+
663
+ while True:
664
+ # Get the user input
665
+ user_input = input("User: ")
666
+
667
+ # Define the messages to be sent
668
+ messages = [
669
+ {"role": "user", "content": user_input}
670
+ ]
671
+
672
+ # Use the mistral_chat method to get the response
673
+ response = llm.chat(messages)
674
+
675
+ # Print the response
676
+ print("AI: ", response)
642
677
  ```
678
+ ### `LLM` with internet
679
+ ```python
680
+ from __future__ import annotations
681
+ from typing import List, Optional
682
+
683
+ from webscout import LLM
684
+ from webscout import WEBS
685
+ import warnings
686
+
687
+ system_message: str = (
688
+ "As AI, you possess internet access and are capable of executing real-time web searches based on user inputs. "
689
+ "You shall utilize this capability to enrich conversations, offer informed insights, and augment your ability to "
690
+ "respond accurately and thoroughly. However, refrain from stating 'You have provided a list of strings,' ensuring "
691
+ "seamless interactions with users. Embrace a responsive demeanor, harnessing available online resources to address "
692
+ "queries, share pertinent content, and facilitate meaningful exchanges. By doing so, you create value through "
693
+ "connection and engagement, ultimately enhancing overall user satisfaction and experience. Additionally, "
694
+ "continue upholding the principles of respect, impartiality, and intellectual integrity throughout all interactions."
695
+ )
696
+
697
+ # Ignore the specific UserWarning
698
+ warnings.filterwarnings("ignore", category=UserWarning, module="curl_cffi.aio", lineno=205)
699
+ LLM = LLM(model="meta-llama/Meta-Llama-3-70B-Instruct", system_message=system_message)
700
+
701
+
702
+ def chat(
703
+ user_input: str, webs: WEBS, max_results: int = 10
704
+ ) -> Optional[str]:
705
+ """
706
+ Chat function to perform a web search based on the user input and generate a response using the LLM model.
707
+
708
+ Parameters
709
+ ----------
710
+ user_input : str
711
+ The user input to be used for the web search
712
+ webs : WEBS
713
+ The web search instance to be used to perform the search
714
+ max_results : int, optional
715
+ The maximum number of search results to include in the response, by default 10
716
+
717
+ Returns
718
+ -------
719
+ Optional[str]
720
+ The response generated by the LLM model, or None if there is no response
721
+ """
722
+ # Perform a web search based on the user input
723
+ search_results: List[str] = []
724
+ for r in webs.text(
725
+ user_input, region="wt-wt", safesearch="off", timelimit="y", max_results=max_results
726
+ ):
727
+ search_results.append(str(r)) # Convert each result to a string
728
+
729
+ # Define the messages to be sent, including the user input, search results, and system message
730
+ messages = [
731
+ {"role": "user", "content": user_input + "\n" + "websearch results are:" + "\n".join(search_results)},
732
+ ]
733
+
734
+ # Use the chat method to get the response
735
+ response = LLM.chat(messages)
736
+
737
+ return response
738
+
739
+
740
+ if __name__ == "__main__":
741
+ while True:
742
+ # Get the user input
743
+ user_input = input("User: ")
744
+
745
+ # Perform a web search based on the user input
746
+ with WEBS() as webs:
747
+ response = chat(user_input, webs)
748
+
749
+ # Print the response
750
+ if response:
751
+ print("AI:", response)
752
+ else:
753
+ print("No response")
754
+ ```
@@ -1,16 +1,12 @@
1
1
  from setuptools import setup, find_packages
2
2
 
3
- # version = None
4
- # with open("webscout/version.py") as version_file:
5
- # exec(version_file.read())
6
-
7
3
  with open("README.md", encoding="utf-8") as f:
8
4
  README = f.read()
9
5
 
10
6
  setup(
11
7
  name="webscout",
12
- version="1.2.9",
13
- description="Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models and now can transcribe yt videos",
8
+ version="1.3.1",
9
+ description="Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos and have TTS support",
14
10
  long_description=README,
15
11
  long_description_content_type="text/markdown",
16
12
  author="OEvortex",
@@ -8,7 +8,7 @@ class LLM:
8
8
  self.model = model
9
9
  self.conversation_history = [{"role": "system", "content": system_message}]
10
10
 
11
- def mistral_chat(self, messages: List[Dict[str, str]]) -> Union[str, None]:
11
+ def chat(self, messages: List[Dict[str, str]]) -> Union[str, None]:
12
12
  url = "https://api.deepinfra.com/v1/openai/chat/completions"
13
13
  headers = {
14
14
  'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
@@ -43,26 +43,3 @@ class LLM:
43
43
  return result.json()['choices'][0]['message']['content']
44
44
  except:
45
45
  return None
46
-
47
- def chat(self):
48
- while True:
49
- prompt = input("👦: ")
50
- user_message = {"role": "user", "content": prompt}
51
- self.conversation_history.append(user_message)
52
- try:
53
- resp = self.mistral_chat(self.conversation_history)
54
- print(f"🤖: {resp}")
55
- self.conversation_history.append({"role": "assistant", "content": resp})
56
- except Exception as e:
57
- print(f"🤖: Oops, something went wrong: {e}! Looks like even AI needs some oiling sometimes.")
58
-
59
- if __name__ == "__main__":
60
- parser = argparse.ArgumentParser(description='LLM CLI', epilog='To use a specific model, run:\n'
61
- 'python -m webscout.LLM model_name\n'
62
- 'Replace "model_name" with the name of the model you wish to use It supports ALL text generation models on deepinfra.com.')
63
- parser.add_argument('model', type=str, help='Model to use for text generation. Specify the full model name, e.g., "mistralai/Mistral-7B-Instruct-v0.1".')
64
- parser.add_argument('--system-message', type=str, default="You are a Helpful AI.", help='Custom system prompt for the AI.')
65
- args = parser.parse_args()
66
-
67
- LLM = LLM(args.model, args.system_message)
68
- LLM.chat()
@@ -10,6 +10,8 @@ from .webscout_search_async import AsyncWEBS
10
10
  from .version import __version__
11
11
  from .DWEBS import DeepWEBS
12
12
  from .transcriber import transcriber
13
+ from .voice import play_audio
14
+ from .LLM import LLM
13
15
 
14
16
 
15
17
  __all__ = ["WEBS", "AsyncWEBS", "__version__", "cli"]
@@ -0,0 +1,2 @@
1
+ __version__ = "1.3.1"
2
+
@@ -0,0 +1,27 @@
1
+ import requests
2
+ import typing
3
+
4
+ def play_audio(message: str, voice: str = "Brian") -> typing.Union[str, typing.NoReturn]:
5
+ """
6
+ Text to speech using StreamElements API
7
+
8
+ Parameters:
9
+ message (str): The text to convert to speech
10
+ voice (str): The voice to use for speech synthesis. Default is "Brian".
11
+
12
+ Returns:
13
+ result (Union[str, None]): Temporary file path or None in failure
14
+ """
15
+ # Base URL for provider API
16
+ url: str = f"https://api.streamelements.com/kappa/v2/speech?voice={voice}&text={{{message}}}"
17
+
18
+ # Request headers
19
+ headers: typing.Dict[str, str] = {
20
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36'
21
+ }
22
+ # Try to send request or return None on failure
23
+ try:
24
+ result = requests.get(url=url, headers=headers)
25
+ return result.content
26
+ except:
27
+ return None
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.2.9
4
- Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models and now can transcribe yt videos
3
+ Version: 1.3.1
4
+ Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos and have TTS support
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
7
  License: HelpingAI Simplified Universal License
@@ -72,6 +72,9 @@ Search for words, documents, images, videos, news, maps and text translation usi
72
72
  - [Activating DeepWEBS](#activating-deepwebs)
73
73
  - [Point to remember before using `DeepWEBS`](#point-to-remember-before-using-deepwebs)
74
74
  - [Usage Example](#usage-example)
75
+ - [Text-to-Speech:](#text-to-speech)
76
+ - [Available TTS Voices:](#available-tts-voices)
77
+ - [ALL voices:](#all-voices)
75
78
  - [WEBS and AsyncWEBS classes](#webs-and-asyncwebs-classes)
76
79
  - [Exceptions](#exceptions)
77
80
  - [usage of webscout](#usage-of-webscout)
@@ -96,6 +99,7 @@ Search for words, documents, images, videos, news, maps and text translation usi
96
99
  - [9. `KOBOLDIA` -](#9-koboldia--)
97
100
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
98
101
  - [`LLM`](#llm)
102
+ - [`LLM` with internet](#llm-with-internet)
99
103
 
100
104
  ## Install
101
105
  ```python
@@ -337,6 +341,23 @@ if __name__ == "__main__":
337
341
  main()
338
342
 
339
343
  ```
344
+ ## Text-to-Speech:
345
+ ```python
346
+ from webscout import play_audio
347
+
348
+ message = "This is an example of text-to-speech."
349
+ audio_content = play_audio(message, voice="Brian")
350
+
351
+ # Save the audio to a file
352
+ with open("output.mp3", "wb") as f:
353
+ f.write(audio_content)
354
+ ```
355
+ ### Available TTS Voices:
356
+ You can choose from a wide range of voices, including:
357
+ - Filiz, Astrid, Tatyana, Maxim, Carmen, Ines, Cristiano, Vitoria, Ricardo, Maja, Jan, Jacek, Ewa, Ruben, Lotte, Liv, Seoyeon, Takumi, Mizuki, Giorgio, Carla, Bianca, Karl, Dora, Mathieu, Celine, Chantal, Penelope, Miguel, Mia, Enrique, Conchita, Geraint, Salli, Matthew, Kimberly, Kendra, Justin, Joey, Joanna, Ivy, Raveena, Aditi, Emma, Brian, Amy, Russell, Nicole, Vicki, Marlene, Hans, Naja, Mads, Gwyneth, Zhiyu
358
+ - Standard and WaveNet voices for various languages (e.g., en-US, es-ES, ja-JP, etc.)
359
+ ### ALL voices:
360
+ [Filiz, Astrid, Tatyana, Maxim, Carmen, Ines, Cristiano, Vitoria, Ricardo, Maja, Jan, Jacek, Ewa, Ruben, Lotte, Liv, Seoyeon, Takumi, Mizuki, Giorgio, Carla, Bianca, Karl, Dora, Mathieu, Celine, Chantal, Penelope, Miguel, Mia, Enrique, Conchita, Geraint, Salli, Matthew, Kimberly, Kendra, Justin, Joey, Joanna, Ivy, Raveena, Aditi, Emma, Brian, Amy, Russell, Nicole, Vicki, Marlene, Hans, Naja, Mads, Gwyneth, Zhiyu, es-ES-Standard-A, it-IT-Standard-A, it-IT-Wavenet-A, ja-JP-Standard-A, ja-JP-Wavenet-A, ko-KR-Standard-A, ko-KR-Wavenet-A, pt-BR-Standard-A, tr-TR-Standard-A, sv-SE-Standard-A, nl-NL-Standard-A, nl-NL-Wavenet-A, en-US-Wavenet-A, en-US-Wavenet-B, en-US-Wavenet-C, en-US-Wavenet-D, en-US-Wavenet-E, en-US-Wavenet-F, en-GB-Standard-A, en-GB-Standard-B, en-GB-Standard-C, en-GB-Standard-D, en-GB-Wavenet-A, en-GB-Wavenet-B, en-GB-Wavenet-C, en-GB-Wavenet-D, en-US-Standard-B, en-US-Standard-C, en-US-Standard-D, en-US-Standard-E, de-DE-Standard-A, de-DE-Standard-B, de-DE-Wavenet-A, de-DE-Wavenet-B, de-DE-Wavenet-C, de-DE-Wavenet-D, en-AU-Standard-A, en-AU-Standard-B, en-AU-Wavenet-A, en-AU-Wavenet-B, en-AU-Wavenet-C, en-AU-Wavenet-D, en-AU-Standard-C, en-AU-Standard-D, fr-CA-Standard-A, fr-CA-Standard-B, fr-CA-Standard-C, fr-CA-Standard-D, fr-FR-Standard-C, fr-FR-Standard-D, fr-FR-Wavenet-A, fr-FR-Wavenet-B, fr-FR-Wavenet-C, fr-FR-Wavenet-D, da-DK-Wavenet-A, pl-PL-Wavenet-A, pl-PL-Wavenet-B, pl-PL-Wavenet-C, pl-PL-Wavenet-D, pt-PT-Wavenet-A, pt-PT-Wavenet-B, pt-PT-Wavenet-C, pt-PT-Wavenet-D, ru-RU-Wavenet-A, ru-RU-Wavenet-B, ru-RU-Wavenet-C, ru-RU-Wavenet-D, sk-SK-Wavenet-A, tr-TR-Wavenet-A, tr-TR-Wavenet-B, tr-TR-Wavenet-C, tr-TR-Wavenet-D, tr-TR-Wavenet-E, uk-UA-Wavenet-A, ar-XA-Wavenet-A, ar-XA-Wavenet-B, ar-XA-Wavenet-C, cs-CZ-Wavenet-A, nl-NL-Wavenet-B, nl-NL-Wavenet-C, nl-NL-Wavenet-D, nl-NL-Wavenet-E, en-IN-Wavenet-A, en-IN-Wavenet-B, en-IN-Wavenet-C, fil-PH-Wavenet-A, fi-FI-Wavenet-A, el-GR-Wavenet-A, hi-IN-Wavenet-A, hi-IN-Wavenet-B, hi-IN-Wavenet-C, hu-HU-Wavenet-A, id-ID-Wavenet-A, id-ID-Wavenet-B, id-ID-Wavenet-C, it-IT-Wavenet-B, it-IT-Wavenet-C, it-IT-Wavenet-D, ja-JP-Wavenet-B, ja-JP-Wavenet-C, ja-JP-Wavenet-D, cmn-CN-Wavenet-A, cmn-CN-Wavenet-B, cmn-CN-Wavenet-C, cmn-CN-Wavenet-D, nb-no-Wavenet-E, nb-no-Wavenet-A, nb-no-Wavenet-B, nb-no-Wavenet-C, nb-no-Wavenet-D, vi-VN-Wavenet-A, vi-VN-Wavenet-B, vi-VN-Wavenet-C, vi-VN-Wavenet-D, sr-rs-Standard-A, lv-lv-Standard-A, is-is-Standard-A, bg-bg-Standard-A, af-ZA-Standard-A, Tracy, Danny, Huihui, Yaoyao, Kangkang, HanHan, Zhiwei, Asaf, An, Stefanos, Filip, Ivan, Heidi, Herena, Kalpana, Hemant, Matej, Andika, Rizwan, Lado, Valluvar, Linda, Heather, Sean, Michael, Karsten, Guillaume, Pattara, Jakub, Szabolcs, Hoda, Naayf]
340
361
  ## WEBS and AsyncWEBS classes
341
362
 
342
363
  The WEBS and AsyncWEBS classes are used to retrieve search results from DuckDuckGo.com and yep.com periodically.
@@ -684,11 +705,102 @@ print(message)
684
705
  ```python
685
706
  from webscout.LLM import LLM
686
707
 
687
- def chat(model_name, system_message="You are Jarvis"):# system prompt
688
- AI = LLM(model_name, system_message)
689
- AI.chat()
708
+ # Read the system message from the file
709
+ with open('system.txt', 'r') as file:
710
+ system_message = file.read()
711
+
712
+ # Initialize the LLM class with the model name and system message
713
+ llm = LLM(model="microsoft/WizardLM-2-8x22B", system_message=system_message)
714
+
715
+ while True:
716
+ # Get the user input
717
+ user_input = input("User: ")
718
+
719
+ # Define the messages to be sent
720
+ messages = [
721
+ {"role": "user", "content": user_input}
722
+ ]
723
+
724
+ # Use the mistral_chat method to get the response
725
+ response = llm.chat(messages)
726
+
727
+ # Print the response
728
+ print("AI: ", response)
729
+ ```
730
+ ### `LLM` with internet
731
+ ```python
732
+ from __future__ import annotations
733
+ from typing import List, Optional
734
+
735
+ from webscout import LLM
736
+ from webscout import WEBS
737
+ import warnings
738
+
739
+ system_message: str = (
740
+ "As AI, you possess internet access and are capable of executing real-time web searches based on user inputs. "
741
+ "You shall utilize this capability to enrich conversations, offer informed insights, and augment your ability to "
742
+ "respond accurately and thoroughly. However, refrain from stating 'You have provided a list of strings,' ensuring "
743
+ "seamless interactions with users. Embrace a responsive demeanor, harnessing available online resources to address "
744
+ "queries, share pertinent content, and facilitate meaningful exchanges. By doing so, you create value through "
745
+ "connection and engagement, ultimately enhancing overall user satisfaction and experience. Additionally, "
746
+ "continue upholding the principles of respect, impartiality, and intellectual integrity throughout all interactions."
747
+ )
748
+
749
+ # Ignore the specific UserWarning
750
+ warnings.filterwarnings("ignore", category=UserWarning, module="curl_cffi.aio", lineno=205)
751
+ LLM = LLM(model="meta-llama/Meta-Llama-3-70B-Instruct", system_message=system_message)
752
+
753
+
754
+ def chat(
755
+ user_input: str, webs: WEBS, max_results: int = 10
756
+ ) -> Optional[str]:
757
+ """
758
+ Chat function to perform a web search based on the user input and generate a response using the LLM model.
759
+
760
+ Parameters
761
+ ----------
762
+ user_input : str
763
+ The user input to be used for the web search
764
+ webs : WEBS
765
+ The web search instance to be used to perform the search
766
+ max_results : int, optional
767
+ The maximum number of search results to include in the response, by default 10
768
+
769
+ Returns
770
+ -------
771
+ Optional[str]
772
+ The response generated by the LLM model, or None if there is no response
773
+ """
774
+ # Perform a web search based on the user input
775
+ search_results: List[str] = []
776
+ for r in webs.text(
777
+ user_input, region="wt-wt", safesearch="off", timelimit="y", max_results=max_results
778
+ ):
779
+ search_results.append(str(r)) # Convert each result to a string
780
+
781
+ # Define the messages to be sent, including the user input, search results, and system message
782
+ messages = [
783
+ {"role": "user", "content": user_input + "\n" + "websearch results are:" + "\n".join(search_results)},
784
+ ]
785
+
786
+ # Use the chat method to get the response
787
+ response = LLM.chat(messages)
788
+
789
+ return response
790
+
690
791
 
691
792
  if __name__ == "__main__":
692
- model_name = "mistralai/Mistral-7B-Instruct-v0.2" # name of the model you wish to use It supports ALL text generation models on deepinfra.com.
693
- chat(model_name)
793
+ while True:
794
+ # Get the user input
795
+ user_input = input("User: ")
796
+
797
+ # Perform a web search based on the user input
798
+ with WEBS() as webs:
799
+ response = chat(user_input, webs)
800
+
801
+ # Print the response
802
+ if response:
803
+ print("AI:", response)
804
+ else:
805
+ print("No response")
694
806
  ```
@@ -27,6 +27,7 @@ webscout/models.py
27
27
  webscout/transcriber.py
28
28
  webscout/utils.py
29
29
  webscout/version.py
30
+ webscout/voice.py
30
31
  webscout/webscout_search.py
31
32
  webscout/webscout_search_async.py
32
33
  webscout.egg-info/PKG-INFO
@@ -1,2 +0,0 @@
1
- __version__ = "1.2.9"
2
-
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes