webscout 1.3.0__tar.gz → 1.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (41) hide show
  1. {webscout-1.3.0 → webscout-1.3.1}/PKG-INFO +99 -7
  2. {webscout-1.3.0 → webscout-1.3.1}/README.md +98 -6
  3. {webscout-1.3.0 → webscout-1.3.1}/setup.py +2 -6
  4. {webscout-1.3.0 → webscout-1.3.1}/webscout/LLM.py +1 -24
  5. {webscout-1.3.0 → webscout-1.3.1}/webscout/__init__.py +1 -0
  6. webscout-1.3.1/webscout/version.py +2 -0
  7. {webscout-1.3.0 → webscout-1.3.1}/webscout.egg-info/PKG-INFO +99 -7
  8. webscout-1.3.0/webscout/version.py +0 -2
  9. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/__init__.py +0 -0
  10. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/documents/__init__.py +0 -0
  11. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/documents/query_results_extractor.py +0 -0
  12. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
  13. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/networks/__init__.py +0 -0
  14. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/networks/filepath_converter.py +0 -0
  15. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/networks/google_searcher.py +0 -0
  16. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/networks/network_configs.py +0 -0
  17. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/networks/webpage_fetcher.py +0 -0
  18. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/utilsdw/__init__.py +0 -0
  19. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/utilsdw/enver.py +0 -0
  20. {webscout-1.3.0 → webscout-1.3.1}/DeepWEBS/utilsdw/logger.py +0 -0
  21. {webscout-1.3.0 → webscout-1.3.1}/LICENSE.md +0 -0
  22. {webscout-1.3.0 → webscout-1.3.1}/setup.cfg +0 -0
  23. {webscout-1.3.0 → webscout-1.3.1}/webscout/AI.py +0 -0
  24. {webscout-1.3.0 → webscout-1.3.1}/webscout/AIbase.py +0 -0
  25. {webscout-1.3.0 → webscout-1.3.1}/webscout/AIutel.py +0 -0
  26. {webscout-1.3.0 → webscout-1.3.1}/webscout/DWEBS.py +0 -0
  27. {webscout-1.3.0 → webscout-1.3.1}/webscout/HelpingAI.py +0 -0
  28. {webscout-1.3.0 → webscout-1.3.1}/webscout/__main__.py +0 -0
  29. {webscout-1.3.0 → webscout-1.3.1}/webscout/cli.py +0 -0
  30. {webscout-1.3.0 → webscout-1.3.1}/webscout/exceptions.py +0 -0
  31. {webscout-1.3.0 → webscout-1.3.1}/webscout/models.py +0 -0
  32. {webscout-1.3.0 → webscout-1.3.1}/webscout/transcriber.py +0 -0
  33. {webscout-1.3.0 → webscout-1.3.1}/webscout/utils.py +0 -0
  34. {webscout-1.3.0 → webscout-1.3.1}/webscout/voice.py +0 -0
  35. {webscout-1.3.0 → webscout-1.3.1}/webscout/webscout_search.py +0 -0
  36. {webscout-1.3.0 → webscout-1.3.1}/webscout/webscout_search_async.py +0 -0
  37. {webscout-1.3.0 → webscout-1.3.1}/webscout.egg-info/SOURCES.txt +0 -0
  38. {webscout-1.3.0 → webscout-1.3.1}/webscout.egg-info/dependency_links.txt +0 -0
  39. {webscout-1.3.0 → webscout-1.3.1}/webscout.egg-info/entry_points.txt +0 -0
  40. {webscout-1.3.0 → webscout-1.3.1}/webscout.egg-info/requires.txt +0 -0
  41. {webscout-1.3.0 → webscout-1.3.1}/webscout.egg-info/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.3.0
4
- Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models and now can transcribe yt videos
3
+ Version: 1.3.1
4
+ Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos and have TTS support
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
7
  License: HelpingAI Simplified Universal License
@@ -99,6 +99,7 @@ Search for words, documents, images, videos, news, maps and text translation usi
99
99
  - [9. `KOBOLDIA` -](#9-koboldia--)
100
100
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
101
101
  - [`LLM`](#llm)
102
+ - [`LLM` with internet](#llm-with-internet)
102
103
 
103
104
  ## Install
104
105
  ```python
@@ -704,11 +705,102 @@ print(message)
704
705
  ```python
705
706
  from webscout.LLM import LLM
706
707
 
707
- def chat(model_name, system_message="You are Jarvis"):# system prompt
708
- AI = LLM(model_name, system_message)
709
- AI.chat()
708
+ # Read the system message from the file
709
+ with open('system.txt', 'r') as file:
710
+ system_message = file.read()
711
+
712
+ # Initialize the LLM class with the model name and system message
713
+ llm = LLM(model="microsoft/WizardLM-2-8x22B", system_message=system_message)
714
+
715
+ while True:
716
+ # Get the user input
717
+ user_input = input("User: ")
718
+
719
+ # Define the messages to be sent
720
+ messages = [
721
+ {"role": "user", "content": user_input}
722
+ ]
723
+
724
+ # Use the mistral_chat method to get the response
725
+ response = llm.chat(messages)
726
+
727
+ # Print the response
728
+ print("AI: ", response)
729
+ ```
730
+ ### `LLM` with internet
731
+ ```python
732
+ from __future__ import annotations
733
+ from typing import List, Optional
734
+
735
+ from webscout import LLM
736
+ from webscout import WEBS
737
+ import warnings
738
+
739
+ system_message: str = (
740
+ "As AI, you possess internet access and are capable of executing real-time web searches based on user inputs. "
741
+ "You shall utilize this capability to enrich conversations, offer informed insights, and augment your ability to "
742
+ "respond accurately and thoroughly. However, refrain from stating 'You have provided a list of strings,' ensuring "
743
+ "seamless interactions with users. Embrace a responsive demeanor, harnessing available online resources to address "
744
+ "queries, share pertinent content, and facilitate meaningful exchanges. By doing so, you create value through "
745
+ "connection and engagement, ultimately enhancing overall user satisfaction and experience. Additionally, "
746
+ "continue upholding the principles of respect, impartiality, and intellectual integrity throughout all interactions."
747
+ )
748
+
749
+ # Ignore the specific UserWarning
750
+ warnings.filterwarnings("ignore", category=UserWarning, module="curl_cffi.aio", lineno=205)
751
+ LLM = LLM(model="meta-llama/Meta-Llama-3-70B-Instruct", system_message=system_message)
752
+
753
+
754
+ def chat(
755
+ user_input: str, webs: WEBS, max_results: int = 10
756
+ ) -> Optional[str]:
757
+ """
758
+ Chat function to perform a web search based on the user input and generate a response using the LLM model.
759
+
760
+ Parameters
761
+ ----------
762
+ user_input : str
763
+ The user input to be used for the web search
764
+ webs : WEBS
765
+ The web search instance to be used to perform the search
766
+ max_results : int, optional
767
+ The maximum number of search results to include in the response, by default 10
768
+
769
+ Returns
770
+ -------
771
+ Optional[str]
772
+ The response generated by the LLM model, or None if there is no response
773
+ """
774
+ # Perform a web search based on the user input
775
+ search_results: List[str] = []
776
+ for r in webs.text(
777
+ user_input, region="wt-wt", safesearch="off", timelimit="y", max_results=max_results
778
+ ):
779
+ search_results.append(str(r)) # Convert each result to a string
780
+
781
+ # Define the messages to be sent, including the user input, search results, and system message
782
+ messages = [
783
+ {"role": "user", "content": user_input + "\n" + "websearch results are:" + "\n".join(search_results)},
784
+ ]
785
+
786
+ # Use the chat method to get the response
787
+ response = LLM.chat(messages)
788
+
789
+ return response
790
+
710
791
 
711
792
  if __name__ == "__main__":
712
- model_name = "mistralai/Mistral-7B-Instruct-v0.2" # name of the model you wish to use It supports ALL text generation models on deepinfra.com.
713
- chat(model_name)
793
+ while True:
794
+ # Get the user input
795
+ user_input = input("User: ")
796
+
797
+ # Perform a web search based on the user input
798
+ with WEBS() as webs:
799
+ response = chat(user_input, webs)
800
+
801
+ # Print the response
802
+ if response:
803
+ print("AI:", response)
804
+ else:
805
+ print("No response")
714
806
  ```
@@ -47,6 +47,7 @@ Search for words, documents, images, videos, news, maps and text translation usi
47
47
  - [9. `KOBOLDIA` -](#9-koboldia--)
48
48
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
49
49
  - [`LLM`](#llm)
50
+ - [`LLM` with internet](#llm-with-internet)
50
51
 
51
52
  ## Install
52
53
  ```python
@@ -652,11 +653,102 @@ print(message)
652
653
  ```python
653
654
  from webscout.LLM import LLM
654
655
 
655
- def chat(model_name, system_message="You are Jarvis"):# system prompt
656
- AI = LLM(model_name, system_message)
657
- AI.chat()
656
+ # Read the system message from the file
657
+ with open('system.txt', 'r') as file:
658
+ system_message = file.read()
658
659
 
659
- if __name__ == "__main__":
660
- model_name = "mistralai/Mistral-7B-Instruct-v0.2" # name of the model you wish to use It supports ALL text generation models on deepinfra.com.
661
- chat(model_name)
660
+ # Initialize the LLM class with the model name and system message
661
+ llm = LLM(model="microsoft/WizardLM-2-8x22B", system_message=system_message)
662
+
663
+ while True:
664
+ # Get the user input
665
+ user_input = input("User: ")
666
+
667
+ # Define the messages to be sent
668
+ messages = [
669
+ {"role": "user", "content": user_input}
670
+ ]
671
+
672
+ # Use the mistral_chat method to get the response
673
+ response = llm.chat(messages)
674
+
675
+ # Print the response
676
+ print("AI: ", response)
662
677
  ```
678
+ ### `LLM` with internet
679
+ ```python
680
+ from __future__ import annotations
681
+ from typing import List, Optional
682
+
683
+ from webscout import LLM
684
+ from webscout import WEBS
685
+ import warnings
686
+
687
+ system_message: str = (
688
+ "As AI, you possess internet access and are capable of executing real-time web searches based on user inputs. "
689
+ "You shall utilize this capability to enrich conversations, offer informed insights, and augment your ability to "
690
+ "respond accurately and thoroughly. However, refrain from stating 'You have provided a list of strings,' ensuring "
691
+ "seamless interactions with users. Embrace a responsive demeanor, harnessing available online resources to address "
692
+ "queries, share pertinent content, and facilitate meaningful exchanges. By doing so, you create value through "
693
+ "connection and engagement, ultimately enhancing overall user satisfaction and experience. Additionally, "
694
+ "continue upholding the principles of respect, impartiality, and intellectual integrity throughout all interactions."
695
+ )
696
+
697
+ # Ignore the specific UserWarning
698
+ warnings.filterwarnings("ignore", category=UserWarning, module="curl_cffi.aio", lineno=205)
699
+ LLM = LLM(model="meta-llama/Meta-Llama-3-70B-Instruct", system_message=system_message)
700
+
701
+
702
+ def chat(
703
+ user_input: str, webs: WEBS, max_results: int = 10
704
+ ) -> Optional[str]:
705
+ """
706
+ Chat function to perform a web search based on the user input and generate a response using the LLM model.
707
+
708
+ Parameters
709
+ ----------
710
+ user_input : str
711
+ The user input to be used for the web search
712
+ webs : WEBS
713
+ The web search instance to be used to perform the search
714
+ max_results : int, optional
715
+ The maximum number of search results to include in the response, by default 10
716
+
717
+ Returns
718
+ -------
719
+ Optional[str]
720
+ The response generated by the LLM model, or None if there is no response
721
+ """
722
+ # Perform a web search based on the user input
723
+ search_results: List[str] = []
724
+ for r in webs.text(
725
+ user_input, region="wt-wt", safesearch="off", timelimit="y", max_results=max_results
726
+ ):
727
+ search_results.append(str(r)) # Convert each result to a string
728
+
729
+ # Define the messages to be sent, including the user input, search results, and system message
730
+ messages = [
731
+ {"role": "user", "content": user_input + "\n" + "websearch results are:" + "\n".join(search_results)},
732
+ ]
733
+
734
+ # Use the chat method to get the response
735
+ response = LLM.chat(messages)
736
+
737
+ return response
738
+
739
+
740
+ if __name__ == "__main__":
741
+ while True:
742
+ # Get the user input
743
+ user_input = input("User: ")
744
+
745
+ # Perform a web search based on the user input
746
+ with WEBS() as webs:
747
+ response = chat(user_input, webs)
748
+
749
+ # Print the response
750
+ if response:
751
+ print("AI:", response)
752
+ else:
753
+ print("No response")
754
+ ```
@@ -1,16 +1,12 @@
1
1
  from setuptools import setup, find_packages
2
2
 
3
- # version = None
4
- # with open("webscout/version.py") as version_file:
5
- # exec(version_file.read())
6
-
7
3
  with open("README.md", encoding="utf-8") as f:
8
4
  README = f.read()
9
5
 
10
6
  setup(
11
7
  name="webscout",
12
- version="1.3.0",
13
- description="Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models and now can transcribe yt videos",
8
+ version="1.3.1",
9
+ description="Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos and have TTS support",
14
10
  long_description=README,
15
11
  long_description_content_type="text/markdown",
16
12
  author="OEvortex",
@@ -8,7 +8,7 @@ class LLM:
8
8
  self.model = model
9
9
  self.conversation_history = [{"role": "system", "content": system_message}]
10
10
 
11
- def mistral_chat(self, messages: List[Dict[str, str]]) -> Union[str, None]:
11
+ def chat(self, messages: List[Dict[str, str]]) -> Union[str, None]:
12
12
  url = "https://api.deepinfra.com/v1/openai/chat/completions"
13
13
  headers = {
14
14
  'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
@@ -43,26 +43,3 @@ class LLM:
43
43
  return result.json()['choices'][0]['message']['content']
44
44
  except:
45
45
  return None
46
-
47
- def chat(self):
48
- while True:
49
- prompt = input("👦: ")
50
- user_message = {"role": "user", "content": prompt}
51
- self.conversation_history.append(user_message)
52
- try:
53
- resp = self.mistral_chat(self.conversation_history)
54
- print(f"🤖: {resp}")
55
- self.conversation_history.append({"role": "assistant", "content": resp})
56
- except Exception as e:
57
- print(f"🤖: Oops, something went wrong: {e}! Looks like even AI needs some oiling sometimes.")
58
-
59
- if __name__ == "__main__":
60
- parser = argparse.ArgumentParser(description='LLM CLI', epilog='To use a specific model, run:\n'
61
- 'python -m webscout.LLM model_name\n'
62
- 'Replace "model_name" with the name of the model you wish to use It supports ALL text generation models on deepinfra.com.')
63
- parser.add_argument('model', type=str, help='Model to use for text generation. Specify the full model name, e.g., "mistralai/Mistral-7B-Instruct-v0.1".')
64
- parser.add_argument('--system-message', type=str, default="You are a Helpful AI.", help='Custom system prompt for the AI.')
65
- args = parser.parse_args()
66
-
67
- LLM = LLM(args.model, args.system_message)
68
- LLM.chat()
@@ -11,6 +11,7 @@ from .version import __version__
11
11
  from .DWEBS import DeepWEBS
12
12
  from .transcriber import transcriber
13
13
  from .voice import play_audio
14
+ from .LLM import LLM
14
15
 
15
16
 
16
17
  __all__ = ["WEBS", "AsyncWEBS", "__version__", "cli"]
@@ -0,0 +1,2 @@
1
+ __version__ = "1.3.1"
2
+
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.3.0
4
- Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models and now can transcribe yt videos
3
+ Version: 1.3.1
4
+ Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos and have TTS support
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
7
  License: HelpingAI Simplified Universal License
@@ -99,6 +99,7 @@ Search for words, documents, images, videos, news, maps and text translation usi
99
99
  - [9. `KOBOLDIA` -](#9-koboldia--)
100
100
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
101
101
  - [`LLM`](#llm)
102
+ - [`LLM` with internet](#llm-with-internet)
102
103
 
103
104
  ## Install
104
105
  ```python
@@ -704,11 +705,102 @@ print(message)
704
705
  ```python
705
706
  from webscout.LLM import LLM
706
707
 
707
- def chat(model_name, system_message="You are Jarvis"):# system prompt
708
- AI = LLM(model_name, system_message)
709
- AI.chat()
708
+ # Read the system message from the file
709
+ with open('system.txt', 'r') as file:
710
+ system_message = file.read()
711
+
712
+ # Initialize the LLM class with the model name and system message
713
+ llm = LLM(model="microsoft/WizardLM-2-8x22B", system_message=system_message)
714
+
715
+ while True:
716
+ # Get the user input
717
+ user_input = input("User: ")
718
+
719
+ # Define the messages to be sent
720
+ messages = [
721
+ {"role": "user", "content": user_input}
722
+ ]
723
+
724
+ # Use the mistral_chat method to get the response
725
+ response = llm.chat(messages)
726
+
727
+ # Print the response
728
+ print("AI: ", response)
729
+ ```
730
+ ### `LLM` with internet
731
+ ```python
732
+ from __future__ import annotations
733
+ from typing import List, Optional
734
+
735
+ from webscout import LLM
736
+ from webscout import WEBS
737
+ import warnings
738
+
739
+ system_message: str = (
740
+ "As AI, you possess internet access and are capable of executing real-time web searches based on user inputs. "
741
+ "You shall utilize this capability to enrich conversations, offer informed insights, and augment your ability to "
742
+ "respond accurately and thoroughly. However, refrain from stating 'You have provided a list of strings,' ensuring "
743
+ "seamless interactions with users. Embrace a responsive demeanor, harnessing available online resources to address "
744
+ "queries, share pertinent content, and facilitate meaningful exchanges. By doing so, you create value through "
745
+ "connection and engagement, ultimately enhancing overall user satisfaction and experience. Additionally, "
746
+ "continue upholding the principles of respect, impartiality, and intellectual integrity throughout all interactions."
747
+ )
748
+
749
+ # Ignore the specific UserWarning
750
+ warnings.filterwarnings("ignore", category=UserWarning, module="curl_cffi.aio", lineno=205)
751
+ LLM = LLM(model="meta-llama/Meta-Llama-3-70B-Instruct", system_message=system_message)
752
+
753
+
754
+ def chat(
755
+ user_input: str, webs: WEBS, max_results: int = 10
756
+ ) -> Optional[str]:
757
+ """
758
+ Chat function to perform a web search based on the user input and generate a response using the LLM model.
759
+
760
+ Parameters
761
+ ----------
762
+ user_input : str
763
+ The user input to be used for the web search
764
+ webs : WEBS
765
+ The web search instance to be used to perform the search
766
+ max_results : int, optional
767
+ The maximum number of search results to include in the response, by default 10
768
+
769
+ Returns
770
+ -------
771
+ Optional[str]
772
+ The response generated by the LLM model, or None if there is no response
773
+ """
774
+ # Perform a web search based on the user input
775
+ search_results: List[str] = []
776
+ for r in webs.text(
777
+ user_input, region="wt-wt", safesearch="off", timelimit="y", max_results=max_results
778
+ ):
779
+ search_results.append(str(r)) # Convert each result to a string
780
+
781
+ # Define the messages to be sent, including the user input, search results, and system message
782
+ messages = [
783
+ {"role": "user", "content": user_input + "\n" + "websearch results are:" + "\n".join(search_results)},
784
+ ]
785
+
786
+ # Use the chat method to get the response
787
+ response = LLM.chat(messages)
788
+
789
+ return response
790
+
710
791
 
711
792
  if __name__ == "__main__":
712
- model_name = "mistralai/Mistral-7B-Instruct-v0.2" # name of the model you wish to use It supports ALL text generation models on deepinfra.com.
713
- chat(model_name)
793
+ while True:
794
+ # Get the user input
795
+ user_input = input("User: ")
796
+
797
+ # Perform a web search based on the user input
798
+ with WEBS() as webs:
799
+ response = chat(user_input, webs)
800
+
801
+ # Print the response
802
+ if response:
803
+ print("AI:", response)
804
+ else:
805
+ print("No response")
714
806
  ```
@@ -1,2 +0,0 @@
1
- __version__ = "1.3.0"
2
-
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes