webscout 1.3.4__tar.gz → 1.3.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (43) hide show
  1. {webscout-1.3.4 → webscout-1.3.6}/PKG-INFO +28 -9
  2. {webscout-1.3.4 → webscout-1.3.6}/README.md +27 -8
  3. {webscout-1.3.4 → webscout-1.3.6}/setup.py +1 -1
  4. {webscout-1.3.4 → webscout-1.3.6}/webscout/AI.py +641 -10
  5. {webscout-1.3.4 → webscout-1.3.6}/webscout/AIutel.py +3 -0
  6. {webscout-1.3.4 → webscout-1.3.6}/webscout/__init__.py +3 -0
  7. webscout-1.3.6/webscout/version.py +2 -0
  8. {webscout-1.3.4 → webscout-1.3.6}/webscout/webai.py +65 -3
  9. {webscout-1.3.4 → webscout-1.3.6}/webscout.egg-info/PKG-INFO +28 -9
  10. webscout-1.3.4/webscout/version.py +0 -2
  11. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/__init__.py +0 -0
  12. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/documents/__init__.py +0 -0
  13. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/documents/query_results_extractor.py +0 -0
  14. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
  15. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/networks/__init__.py +0 -0
  16. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/networks/filepath_converter.py +0 -0
  17. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/networks/google_searcher.py +0 -0
  18. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/networks/network_configs.py +0 -0
  19. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/networks/webpage_fetcher.py +0 -0
  20. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/utilsdw/__init__.py +0 -0
  21. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/utilsdw/enver.py +0 -0
  22. {webscout-1.3.4 → webscout-1.3.6}/DeepWEBS/utilsdw/logger.py +0 -0
  23. {webscout-1.3.4 → webscout-1.3.6}/LICENSE.md +0 -0
  24. {webscout-1.3.4 → webscout-1.3.6}/setup.cfg +0 -0
  25. {webscout-1.3.4 → webscout-1.3.6}/webscout/AIbase.py +0 -0
  26. {webscout-1.3.4 → webscout-1.3.6}/webscout/DWEBS.py +0 -0
  27. {webscout-1.3.4 → webscout-1.3.6}/webscout/HelpingAI.py +0 -0
  28. {webscout-1.3.4 → webscout-1.3.6}/webscout/LLM.py +0 -0
  29. {webscout-1.3.4 → webscout-1.3.6}/webscout/__main__.py +0 -0
  30. {webscout-1.3.4 → webscout-1.3.6}/webscout/cli.py +0 -0
  31. {webscout-1.3.4 → webscout-1.3.6}/webscout/exceptions.py +0 -0
  32. {webscout-1.3.4 → webscout-1.3.6}/webscout/g4f.py +0 -0
  33. {webscout-1.3.4 → webscout-1.3.6}/webscout/models.py +0 -0
  34. {webscout-1.3.4 → webscout-1.3.6}/webscout/transcriber.py +0 -0
  35. {webscout-1.3.4 → webscout-1.3.6}/webscout/utils.py +0 -0
  36. {webscout-1.3.4 → webscout-1.3.6}/webscout/voice.py +0 -0
  37. {webscout-1.3.4 → webscout-1.3.6}/webscout/webscout_search.py +0 -0
  38. {webscout-1.3.4 → webscout-1.3.6}/webscout/webscout_search_async.py +0 -0
  39. {webscout-1.3.4 → webscout-1.3.6}/webscout.egg-info/SOURCES.txt +0 -0
  40. {webscout-1.3.4 → webscout-1.3.6}/webscout.egg-info/dependency_links.txt +0 -0
  41. {webscout-1.3.4 → webscout-1.3.6}/webscout.egg-info/entry_points.txt +0 -0
  42. {webscout-1.3.4 → webscout-1.3.6}/webscout.egg-info/requires.txt +0 -0
  43. {webscout-1.3.4 → webscout-1.3.6}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.3.4
3
+ Version: 1.3.6
4
4
  Summary: Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos, have TTS support and now has webai(terminal gpt and open interpeter) support
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -97,11 +97,12 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
97
97
  - [8. `OpenGPT` - chat With OPENGPT](#8-opengpt---chat-with-opengpt)
98
98
  - [9. `KOBOLDIA` -](#9-koboldia--)
99
99
  - [10. `Sean` - chat With Sean](#10-sean---chat-with-sean)
100
+ - [11. `Reka` - chat with reka](#11-reka---chat-with-reka)
101
+ - [12. `Cohere` - chat with cohere](#12-cohere---chat-with-cohere)
100
102
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
101
103
  - [`LLM`](#llm)
102
104
  - [`LLM` with internet](#llm-with-internet)
103
105
  - [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
104
- - [for using as terminal gpt](#for-using-as-terminal-gpt)
105
106
 
106
107
  ## Install
107
108
  ```python
@@ -698,6 +699,27 @@ response_str = a.chat(prompt)
698
699
  print(response_str)
699
700
  ```
700
701
 
702
+ ### 11. `Reka` - chat with reka
703
+ ```python
704
+ from webscout.AI import REKA
705
+
706
+ a = REKA(is_conversation=True, max_tokens=8000, timeout=30,api_key="")
707
+
708
+ prompt = "tell me about india"
709
+ response_str = a.chat(prompt)
710
+ print(response_str)
711
+ ```
712
+
713
+ ### 12. `Cohere` - chat with cohere
714
+ ```python
715
+ from webscout.AI import Cohere
716
+
717
+ a = Cohere(is_conversation=True, max_tokens=8000, timeout=30,api_key="")
718
+
719
+ prompt = "tell me about india"
720
+ response_str = a.chat(prompt)
721
+ print(response_str)
722
+ ```
701
723
  ## usage of special .LLM file from webscout (webscout.LLM)
702
724
 
703
725
  ### `LLM`
@@ -816,12 +838,12 @@ def use_rawdog_with_webai(prompt):
816
838
  try:
817
839
  webai_bot = Main(
818
840
  max_tokens=500,
819
- provider="phind",
841
+ provider="cohere",
820
842
  temperature=0.7,
821
843
  top_k=40,
822
844
  top_p=0.95,
823
- model="Phind Model", # Replace with your desired model
824
- auth=None, # Replace with your auth key/value (if needed)
845
+ model="command-r-plus", # Replace with your desired model
846
+ auth="0zoQbKs1AAgd8WrPBO9CTIGgVvm5ZMbDcCqJOVyl", # Replace with your auth key/value (if needed)
825
847
  timeout=30,
826
848
  disable_conversation=True,
827
849
  filepath=None,
@@ -841,11 +863,8 @@ def use_rawdog_with_webai(prompt):
841
863
  if __name__ == "__main__":
842
864
  user_prompt = input("Enter your prompt: ")
843
865
  use_rawdog_with_webai(user_prompt)
866
+
844
867
  ```
845
868
  ```shell
846
869
  python -m webscout.webai webai --provider "phind" --rawdog
847
870
  ```
848
- ### for using as terminal gpt
849
- ```python
850
- python -m webscout.webai webai --provider "sean"
851
- ```
@@ -45,11 +45,12 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
45
45
  - [8. `OpenGPT` - chat With OPENGPT](#8-opengpt---chat-with-opengpt)
46
46
  - [9. `KOBOLDIA` -](#9-koboldia--)
47
47
  - [10. `Sean` - chat With Sean](#10-sean---chat-with-sean)
48
+ - [11. `Reka` - chat with reka](#11-reka---chat-with-reka)
49
+ - [12. `Cohere` - chat with cohere](#12-cohere---chat-with-cohere)
48
50
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
49
51
  - [`LLM`](#llm)
50
52
  - [`LLM` with internet](#llm-with-internet)
51
53
  - [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
52
- - [for using as terminal gpt](#for-using-as-terminal-gpt)
53
54
 
54
55
  ## Install
55
56
  ```python
@@ -646,6 +647,27 @@ response_str = a.chat(prompt)
646
647
  print(response_str)
647
648
  ```
648
649
 
650
+ ### 11. `Reka` - chat with reka
651
+ ```python
652
+ from webscout.AI import REKA
653
+
654
+ a = REKA(is_conversation=True, max_tokens=8000, timeout=30,api_key="")
655
+
656
+ prompt = "tell me about india"
657
+ response_str = a.chat(prompt)
658
+ print(response_str)
659
+ ```
660
+
661
+ ### 12. `Cohere` - chat with cohere
662
+ ```python
663
+ from webscout.AI import Cohere
664
+
665
+ a = Cohere(is_conversation=True, max_tokens=8000, timeout=30,api_key="")
666
+
667
+ prompt = "tell me about india"
668
+ response_str = a.chat(prompt)
669
+ print(response_str)
670
+ ```
649
671
  ## usage of special .LLM file from webscout (webscout.LLM)
650
672
 
651
673
  ### `LLM`
@@ -764,12 +786,12 @@ def use_rawdog_with_webai(prompt):
764
786
  try:
765
787
  webai_bot = Main(
766
788
  max_tokens=500,
767
- provider="phind",
789
+ provider="cohere",
768
790
  temperature=0.7,
769
791
  top_k=40,
770
792
  top_p=0.95,
771
- model="Phind Model", # Replace with your desired model
772
- auth=None, # Replace with your auth key/value (if needed)
793
+ model="command-r-plus", # Replace with your desired model
794
+ auth="0zoQbKs1AAgd8WrPBO9CTIGgVvm5ZMbDcCqJOVyl", # Replace with your auth key/value (if needed)
773
795
  timeout=30,
774
796
  disable_conversation=True,
775
797
  filepath=None,
@@ -789,11 +811,8 @@ def use_rawdog_with_webai(prompt):
789
811
  if __name__ == "__main__":
790
812
  user_prompt = input("Enter your prompt: ")
791
813
  use_rawdog_with_webai(user_prompt)
814
+
792
815
  ```
793
816
  ```shell
794
817
  python -m webscout.webai webai --provider "phind" --rawdog
795
818
  ```
796
- ### for using as terminal gpt
797
- ```python
798
- python -m webscout.webai webai --provider "sean"
799
- ```
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="1.3.4",
8
+ version="1.3.6",
9
9
  description="Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos, have TTS support and now has webai(terminal gpt and open interpeter) support",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -25,8 +25,639 @@ from webscout.AIbase import Provider
25
25
  from Helpingai_T2 import Perplexity
26
26
  from typing import Any
27
27
  import logging
28
- #----------------------------------------------------------Sean-----------------------------------------------------------
29
- class Sean:
28
+ #-----------------------------------------------Cohere--------------------------------------------
29
+ class Cohere(Provider):
30
+ def __init__(
31
+ self,
32
+ api_key: str,
33
+ is_conversation: bool = True,
34
+ max_tokens: int = 600,
35
+ model: str = "command-r-plus",
36
+ temperature: float = 0.7,
37
+ system_prompt: str = "You are helpful AI",
38
+ timeout: int = 30,
39
+ intro: str = None,
40
+ filepath: str = None,
41
+ update_file: bool = True,
42
+ proxies: dict = {},
43
+ history_offset: int = 10250,
44
+ act: str = None,
45
+ top_k: int = -1,
46
+ top_p: float = 0.999,
47
+ ):
48
+ """Initializes Cohere
49
+
50
+ Args:
51
+ api_key (str): Cohere API key.
52
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
53
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
54
+ model (str, optional): Model to use for generating text. Defaults to "command-r-plus".
55
+ temperature (float, optional): Diversity of the generated text. Higher values produce more diverse outputs.
56
+ Defaults to 0.7.
57
+ system_prompt (str, optional): A system_prompt or context to set the style or tone of the generated text.
58
+ Defaults to "You are helpful AI".
59
+ timeout (int, optional): Http request timeout. Defaults to 30.
60
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
61
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
62
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
63
+ proxies (dict, optional): Http request proxies. Defaults to {}.
64
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
65
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
66
+ """
67
+ self.session = requests.Session()
68
+ self.is_conversation = is_conversation
69
+ self.max_tokens_to_sample = max_tokens
70
+ self.api_key = api_key
71
+ self.model = model
72
+ self.temperature = temperature
73
+ self.system_prompt = system_prompt
74
+ self.chat_endpoint = "https://production.api.os.cohere.ai/coral/v1/chat"
75
+ self.stream_chunk_size = 64
76
+ self.timeout = timeout
77
+ self.last_response = {}
78
+ self.headers = {
79
+ "Content-Type": "application/json",
80
+ "Authorization": f"Bearer {self.api_key}",
81
+ }
82
+
83
+ self.__available_optimizers = (
84
+ method
85
+ for method in dir(Optimizers)
86
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
87
+ )
88
+ self.session.headers.update(self.headers)
89
+ Conversation.intro = (
90
+ AwesomePrompts().get_act(
91
+ act, raise_not_found=True, default=None, case_insensitive=True
92
+ )
93
+ if act
94
+ else intro or Conversation.intro
95
+ )
96
+ self.conversation = Conversation(
97
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
98
+ )
99
+ self.conversation.history_offset = history_offset
100
+ self.session.proxies = proxies
101
+
102
+ def ask(
103
+ self,
104
+ prompt: str,
105
+ stream: bool = False,
106
+ raw: bool = False,
107
+ optimizer: str = None,
108
+ conversationally: bool = False,
109
+ ) -> dict:
110
+ """Chat with AI
111
+
112
+ Args:
113
+ prompt (str): Prompt to be send.
114
+ stream (bool, optional): Flag for streaming response. Defaults to False.
115
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
116
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
117
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
118
+ Returns:
119
+ dict : {}
120
+ ```json
121
+ {
122
+ "text" : "How may I assist you today?"
123
+ }
124
+ ```
125
+ """
126
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
127
+ if optimizer:
128
+ if optimizer in self.__available_optimizers:
129
+ conversation_prompt = getattr(Optimizers, optimizer)(
130
+ conversation_prompt if conversationally else prompt
131
+ )
132
+ else:
133
+ raise Exception(
134
+ f"Optimizer is not one of {self.__available_optimizers}"
135
+ )
136
+ self.session.headers.update(self.headers)
137
+ payload = {
138
+ "message": conversation_prompt,
139
+ "model": self.model,
140
+ "temperature": self.temperature,
141
+ "preamble": self.system_prompt,
142
+ }
143
+
144
+ def for_stream():
145
+ response = self.session.post(
146
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
147
+ )
148
+ if not response.ok:
149
+ raise Exception(
150
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
151
+ )
152
+
153
+ for value in response.iter_lines(
154
+ decode_unicode=True,
155
+ chunk_size=self.stream_chunk_size,
156
+ ):
157
+ try:
158
+ resp = json.loads(value.strip().split("\n")[-1])
159
+ self.last_response.update(resp)
160
+ yield value if raw else resp
161
+ except json.decoder.JSONDecodeError:
162
+ pass
163
+ self.conversation.update_chat_history(
164
+ prompt, self.get_message(self.last_response)
165
+ )
166
+
167
+ def for_non_stream():
168
+ # let's make use of stream
169
+ for _ in for_stream():
170
+ pass
171
+ return self.last_response
172
+
173
+ return for_stream() if stream else for_non_stream()
174
+
175
+ def chat(
176
+ self,
177
+ prompt: str,
178
+ stream: bool = False,
179
+ optimizer: str = None,
180
+ conversationally: bool = False,
181
+ ) -> str:
182
+ """Generate response `str`
183
+ Args:
184
+ prompt (str): Prompt to be send.
185
+ stream (bool, optional): Flag for streaming response. Defaults to False.
186
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
187
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
188
+ Returns:
189
+ str: Response generated
190
+ """
191
+
192
+ def for_stream():
193
+ for response in self.ask(
194
+ prompt, True, optimizer=optimizer, conversationally=conversationally
195
+ ):
196
+ yield self.get_message(response)
197
+
198
+ def for_non_stream():
199
+ return self.get_message(
200
+ self.ask(
201
+ prompt,
202
+ False,
203
+ optimizer=optimizer,
204
+ conversationally=conversationally,
205
+ )
206
+ )
207
+
208
+ return for_stream() if stream else for_non_stream()
209
+
210
+ def get_message(self, response: dict) -> str:
211
+ """Retrieves message only from response
212
+
213
+ Args:
214
+ response (dict): Response generated by `self.ask`
215
+
216
+ Returns:
217
+ str: Message extracted
218
+ """
219
+ assert isinstance(response, dict), "Response should be of dict data-type only"
220
+ return response["result"]["chatStreamEndEvent"]["response"]["text"]
221
+ #-----------------------------------------------REKA-----------------------------------------------
222
+ class REKA(Provider):
223
+ def __init__(
224
+ self,
225
+ api_key: str,
226
+ is_conversation: bool = True,
227
+ max_tokens: int = 600,
228
+ timeout: int = 30,
229
+ intro: str = None,
230
+ filepath: str = None,
231
+ update_file: bool = True,
232
+ proxies: dict = {},
233
+ history_offset: int = 10250,
234
+ act: str = None,
235
+ model: str = "reka-core",
236
+ system_prompt: str = "Be Helpful and Friendly. Keep your response straightforward, short and concise",
237
+ use_search_engine: bool = False,
238
+ use_code_interpreter: bool = False,
239
+ ):
240
+ """Instantiates REKA
241
+
242
+ Args:
243
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
244
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
245
+ timeout (int, optional): Http request timeout. Defaults to 30.
246
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
247
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
248
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
249
+ proxies (dict, optional): Http request proxies. Defaults to {}.
250
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
251
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
252
+ model (str, optional): REKA model name. Defaults to "reka-core".
253
+ system_prompt (str, optional): System prompt for REKA. Defaults to "Be Helpful and Friendly. Keep your response straightforward, short and concise".
254
+ use_search_engine (bool, optional): Whether to use the search engine. Defaults to False.
255
+ use_code_interpreter (bool, optional): Whether to use the code interpreter. Defaults to False.
256
+ """
257
+ self.session = requests.Session()
258
+ self.is_conversation = is_conversation
259
+ self.max_tokens_to_sample = max_tokens
260
+ self.api_endpoint = "https://chat.reka.ai/api/chat"
261
+ self.stream_chunk_size = 64
262
+ self.timeout = timeout
263
+ self.last_response = {}
264
+ self.model = model
265
+ self.system_prompt = system_prompt
266
+ self.use_search_engine = use_search_engine
267
+ self.use_code_interpreter = use_code_interpreter
268
+ self.access_token = api_key
269
+ self.headers = {
270
+ "Authorization": f"Bearer {self.access_token}",
271
+ }
272
+
273
+ self.__available_optimizers = (
274
+ method
275
+ for method in dir(Optimizers)
276
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
277
+ )
278
+ self.session.headers.update(self.headers)
279
+ Conversation.intro = (
280
+ AwesomePrompts().get_act(
281
+ act, raise_not_found=True, default=None, case_insensitive=True
282
+ )
283
+ if act
284
+ else intro or Conversation.intro
285
+ )
286
+ self.conversation = Conversation(
287
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
288
+ )
289
+ self.conversation.history_offset = history_offset
290
+ self.session.proxies = proxies
291
+
292
+ def ask(
293
+ self,
294
+ prompt: str,
295
+ stream: bool = False,
296
+ raw: bool = False,
297
+ optimizer: str = None,
298
+ conversationally: bool = False,
299
+ ) -> dict:
300
+ """Chat with AI
301
+
302
+ Args:
303
+ prompt (str): Prompt to be send.
304
+ stream (bool, optional): Flag for streaming response. Defaults to False.
305
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
306
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
307
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
308
+ Returns:
309
+ dict : {}
310
+ ```json
311
+ {
312
+ "text" : "How may I assist you today?"
313
+ }
314
+ ```
315
+ """
316
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
317
+ if optimizer:
318
+ if optimizer in self.__available_optimizers:
319
+ conversation_prompt = getattr(Optimizers, optimizer)(
320
+ conversation_prompt if conversationally else prompt
321
+ )
322
+ else:
323
+ raise Exception(
324
+ f"Optimizer is not one of {self.__available_optimizers}"
325
+ )
326
+
327
+ self.session.headers.update(self.headers)
328
+ payload = {
329
+
330
+ "conversation_history": [
331
+ {"type": "human", "text": f"## SYSTEM PROMPT: {self.system_prompt}\n\n## QUERY: {conversation_prompt}"},
332
+ ],
333
+
334
+ "stream": stream,
335
+ "use_search_engine": self.use_search_engine,
336
+ "use_code_interpreter": self.use_code_interpreter,
337
+ "model_name": self.model,
338
+ # "model_name": "reka-flash",
339
+ # "model_name": "reka-edge",
340
+ }
341
+
342
+ def for_stream():
343
+ response = self.session.post(self.api_endpoint, json=payload, stream=True, timeout=self.timeout)
344
+ if not response.ok:
345
+ raise Exception(
346
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
347
+ )
348
+
349
+ for value in response.iter_lines(
350
+ decode_unicode=True,
351
+ chunk_size=self.stream_chunk_size,
352
+ ):
353
+ try:
354
+ resp = json.loads(value)
355
+ self.last_response.update(resp)
356
+ yield value if raw else resp
357
+ except json.decoder.JSONDecodeError:
358
+ pass
359
+ self.conversation.update_chat_history(
360
+ prompt, self.get_message(self.last_response)
361
+ )
362
+
363
+ def for_non_stream():
364
+ # let's make use of stream
365
+ for _ in for_stream():
366
+ pass
367
+ return self.last_response
368
+
369
+ return for_stream() if stream else for_non_stream()
370
+
371
+ def chat(
372
+ self,
373
+ prompt: str,
374
+ stream: bool = False,
375
+ optimizer: str = None,
376
+ conversationally: bool = False,
377
+ ) -> str:
378
+ """Generate response `str`
379
+ Args:
380
+ prompt (str): Prompt to be send.
381
+ stream (bool, optional): Flag for streaming response. Defaults to False.
382
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
383
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
384
+ Returns:
385
+ str: Response generated
386
+ """
387
+
388
+ def for_stream():
389
+ for response in self.ask(
390
+ prompt, True, optimizer=optimizer, conversationally=conversationally
391
+ ):
392
+ yield self.get_message(response)
393
+
394
+ def for_non_stream():
395
+ return self.get_message(
396
+ self.ask(
397
+ prompt,
398
+ False,
399
+ optimizer=optimizer,
400
+ conversationally=conversationally,
401
+ )
402
+ )
403
+
404
+ return for_stream() if stream else for_non_stream()
405
+
406
+ def get_message(self, response: dict) -> str:
407
+ """Retrieves message only from response
408
+
409
+ Args:
410
+ response (dict): Response generated by `self.ask`
411
+
412
+ Returns:
413
+ str: Message extracted
414
+ """
415
+ assert isinstance(response, dict), "Response should be of dict data-type only"
416
+ return response.get("text")
417
+ #-----------------------------------------------GROQ-----------------------------------------------
418
+ class GROQ(Provider):
419
+ def __init__(
420
+ self,
421
+ api_key: str,
422
+ is_conversation: bool = True,
423
+ max_tokens: int = 600,
424
+ temperature: float = 1,
425
+ presence_penalty: int = 0,
426
+ frequency_penalty: int = 0,
427
+ top_p: float = 1,
428
+ model: str = "mixtral-8x7b-32768",
429
+ timeout: int = 30,
430
+ intro: str = None,
431
+ filepath: str = None,
432
+ update_file: bool = True,
433
+ proxies: dict = {},
434
+ history_offset: int = 10250,
435
+ act: str = None,
436
+ ):
437
+ """Instantiates GROQ
438
+
439
+ Args:
440
+ api_key (key): GROQ's API key.
441
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
442
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
443
+ temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
444
+ presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
445
+ frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
446
+ top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
447
+ model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
448
+ timeout (int, optional): Http request timeout. Defaults to 30.
449
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
450
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
451
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
452
+ proxies (dict, optional): Http request proxies. Defaults to {}.
453
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
454
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
455
+ """
456
+ self.session = requests.Session()
457
+ self.is_conversation = is_conversation
458
+ self.max_tokens_to_sample = max_tokens
459
+ self.api_key = api_key
460
+ self.model = model
461
+ self.temperature = temperature
462
+ self.presence_penalty = presence_penalty
463
+ self.frequency_penalty = frequency_penalty
464
+ self.top_p = top_p
465
+ self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
466
+ self.stream_chunk_size = 64
467
+ self.timeout = timeout
468
+ self.last_response = {}
469
+ self.headers = {
470
+ "Content-Type": "application/json",
471
+ "Authorization": f"Bearer {self.api_key}",
472
+ }
473
+
474
+ self.__available_optimizers = (
475
+ method
476
+ for method in dir(Optimizers)
477
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
478
+ )
479
+ self.session.headers.update(self.headers)
480
+ Conversation.intro = (
481
+ AwesomePrompts().get_act(
482
+ act, raise_not_found=True, default=None, case_insensitive=True
483
+ )
484
+ if act
485
+ else intro or Conversation.intro
486
+ )
487
+ self.conversation = Conversation(
488
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
489
+ )
490
+ self.conversation.history_offset = history_offset
491
+ self.session.proxies = proxies
492
+
493
+ def ask(
494
+ self,
495
+ prompt: str,
496
+ stream: bool = False,
497
+ raw: bool = False,
498
+ optimizer: str = None,
499
+ conversationally: bool = False,
500
+ ) -> dict:
501
+ """Chat with AI
502
+
503
+ Args:
504
+ prompt (str): Prompt to be send.
505
+ stream (bool, optional): Flag for streaming response. Defaults to False.
506
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
507
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
508
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
509
+ Returns:
510
+ dict : {}
511
+ ```json
512
+ {
513
+ "id": "c0c8d139-d2b9-9909-8aa1-14948bc28404",
514
+ "object": "chat.completion",
515
+ "created": 1710852779,
516
+ "model": "mixtral-8x7b-32768",
517
+ "choices": [
518
+ {
519
+ "index": 0,
520
+ "message": {
521
+ "role": "assistant",
522
+ "content": "Hello! How can I assist you today? I'm here to help answer your questions and engage in conversation on a wide variety of topics. Feel free to ask me anything!"
523
+ },
524
+ "logprobs": null,
525
+ "finish_reason": "stop"
526
+ }
527
+ ],
528
+ "usage": {
529
+ "prompt_tokens": 47,
530
+ "prompt_time": 0.03,
531
+ "completion_tokens": 37,
532
+ "completion_time": 0.069,
533
+ "total_tokens": 84,
534
+ "total_time": 0.099
535
+ },
536
+ "system_fingerprint": null
537
+ }
538
+ ```
539
+ """
540
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
541
+ if optimizer:
542
+ if optimizer in self.__available_optimizers:
543
+ conversation_prompt = getattr(Optimizers, optimizer)(
544
+ conversation_prompt if conversationally else prompt
545
+ )
546
+ else:
547
+ raise Exception(
548
+ f"Optimizer is not one of {self.__available_optimizers}"
549
+ )
550
+ self.session.headers.update(self.headers)
551
+ payload = {
552
+ "frequency_penalty": self.frequency_penalty,
553
+ "messages": [{"content": conversation_prompt, "role": "user"}],
554
+ "model": self.model,
555
+ "presence_penalty": self.presence_penalty,
556
+ "stream": stream,
557
+ "temperature": self.temperature,
558
+ "top_p": self.top_p,
559
+ }
560
+
561
+ def for_stream():
562
+ response = self.session.post(
563
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
564
+ )
565
+ if not response.ok:
566
+ raise Exception(
567
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
568
+ )
569
+
570
+ message_load = ""
571
+ for value in response.iter_lines(
572
+ decode_unicode=True,
573
+ delimiter="" if raw else "data:",
574
+ chunk_size=self.stream_chunk_size,
575
+ ):
576
+ try:
577
+ resp = json.loads(value)
578
+ incomplete_message = self.get_message(resp)
579
+ if incomplete_message:
580
+ message_load += incomplete_message
581
+ resp["choices"][0]["delta"]["content"] = message_load
582
+ self.last_response.update(resp)
583
+ yield value if raw else resp
584
+ elif raw:
585
+ yield value
586
+ except json.decoder.JSONDecodeError:
587
+ pass
588
+ self.conversation.update_chat_history(
589
+ prompt, self.get_message(self.last_response)
590
+ )
591
+
592
+ def for_non_stream():
593
+ response = self.session.post(
594
+ self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
595
+ )
596
+ if not response.ok:
597
+ raise Exception(
598
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
599
+ )
600
+ resp = response.json()
601
+ self.last_response.update(resp)
602
+ self.conversation.update_chat_history(
603
+ prompt, self.get_message(self.last_response)
604
+ )
605
+ return resp
606
+
607
+ return for_stream() if stream else for_non_stream()
608
+
609
+ def chat(
610
+ self,
611
+ prompt: str,
612
+ stream: bool = False,
613
+ optimizer: str = None,
614
+ conversationally: bool = False,
615
+ ) -> str:
616
+ """Generate response `str`
617
+ Args:
618
+ prompt (str): Prompt to be send.
619
+ stream (bool, optional): Flag for streaming response. Defaults to False.
620
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
621
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
622
+ Returns:
623
+ str: Response generated
624
+ """
625
+
626
+ def for_stream():
627
+ for response in self.ask(
628
+ prompt, True, optimizer=optimizer, conversationally=conversationally
629
+ ):
630
+ yield self.get_message(response)
631
+
632
+ def for_non_stream():
633
+ return self.get_message(
634
+ self.ask(
635
+ prompt,
636
+ False,
637
+ optimizer=optimizer,
638
+ conversationally=conversationally,
639
+ )
640
+ )
641
+
642
+ return for_stream() if stream else for_non_stream()
643
+
644
+ def get_message(self, response: dict) -> str:
645
+ """Retrieves message only from response
646
+
647
+ Args:
648
+ response (dict): Response generated by `self.ask`
649
+
650
+ Returns:
651
+ str: Message extracted
652
+ """
653
+ assert isinstance(response, dict), "Response should be of dict data-type only"
654
+ try:
655
+ if response["choices"][0].get("delta"):
656
+ return response["choices"][0]["delta"]["content"]
657
+ return response["choices"][0]["message"]["content"]
658
+ except KeyError:
659
+ return ""
660
+ #----------------------------------------------------------Sean-------------------------------------class Sean:
30
661
  def __init__(
31
662
  self,
32
663
  is_conversation: bool = True,
@@ -254,7 +885,7 @@ class Sean:
254
885
  """
255
886
  assert isinstance(response, dict), "Response should be of dict data-type only"
256
887
  return response["content"]
257
- #----------------------------------------------------------OpenAI-----------------------------------------------------------
888
+ #----------------------------------------------------------OpenAI-----------------------------------
258
889
  class OPENAI(Provider):
259
890
  model = "gpt-3.5-turbo"
260
891
  def __init__(
@@ -294,6 +925,7 @@ class OPENAI(Provider):
294
925
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
295
926
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
296
927
  """
928
+ self.session = requests.Session()
297
929
  self.is_conversation = is_conversation
298
930
  self.max_tokens_to_sample = max_tokens
299
931
  self.api_key = api_key
@@ -498,9 +1130,9 @@ class OPENAI(Provider):
498
1130
  #--------------------------------------LEO-----------------------------------------
499
1131
  class LEO(Provider):
500
1132
 
501
- model = "llama-2-13b-chat"
1133
+ # model = "llama-2-13b-chat"
502
1134
 
503
- key = "qztbjzBqJueQZLFkwTTJrieu8Vw3789u"
1135
+ # key = "qztbjzBqJueQZLFkwTTJrieu8Vw3789u"
504
1136
  def __init__(
505
1137
  self,
506
1138
  is_conversation: bool = True,
@@ -508,8 +1140,8 @@ class LEO(Provider):
508
1140
  temperature: float = 0.2,
509
1141
  top_k: int = -1,
510
1142
  top_p: float = 0.999,
511
- model: str = model,
512
- brave_key: str = key,
1143
+ model: str = "llama-2-13b-chat",
1144
+ brave_key: str = "qztbjzBqJueQZLFkwTTJrieu8Vw3789u",
513
1145
  timeout: int = 30,
514
1146
  intro: str = None,
515
1147
  filepath: str = None,
@@ -536,6 +1168,7 @@ class LEO(Provider):
536
1168
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
537
1169
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
538
1170
  """
1171
+ self.session = requests.Session()
539
1172
  self.is_conversation = is_conversation
540
1173
  self.max_tokens_to_sample = max_tokens
541
1174
  self.model = model
@@ -559,7 +1192,7 @@ class LEO(Provider):
559
1192
  for method in dir(Optimizers)
560
1193
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
561
1194
  )
562
- self.ession.headers.update(self.headers)
1195
+ self.session.headers.update(self.headers)
563
1196
  Conversation.intro = (
564
1197
  AwesomePrompts().get_act(
565
1198
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -1932,9 +2565,7 @@ from os import path
1932
2565
  from json import load
1933
2566
  from json import dumps
1934
2567
  import warnings
1935
-
1936
2568
  logging.getLogger("httpx").setLevel(logging.ERROR)
1937
-
1938
2569
  warnings.simplefilter("ignore", category=UserWarning)
1939
2570
  class GEMINI(Provider):
1940
2571
  def __init__(
@@ -28,6 +28,9 @@ webai = [
28
28
  "g4fauto",
29
29
  "perplexity",
30
30
  "sean",
31
+ "groq",
32
+ "reka",
33
+ "cohere"
31
34
  ]
32
35
 
33
36
  gpt4free_providers = [
@@ -25,6 +25,9 @@ webai = [
25
25
  "g4fauto",
26
26
  "perplexity",
27
27
  "sean",
28
+ "groq",
29
+ "reka",
30
+ "cohere"
28
31
  ]
29
32
 
30
33
  gpt4free_providers = [
@@ -0,0 +1,2 @@
1
+ __version__ = "1.3.6"
2
+
@@ -416,14 +416,14 @@ class Main(cmd.Cmd):
416
416
  elif provider == "leo":
417
417
  from webscout.AI import LEO
418
418
 
419
- self.bot = LEO.LEO(
419
+ self.bot = LEO(
420
420
  is_conversation=disable_conversation,
421
421
  max_tokens=max_tokens,
422
422
  temperature=temperature,
423
423
  top_k=top_k,
424
424
  top_p=top_p,
425
- model=getOr(model, LEO.model),
426
- brave_key=getOr(auth, LEO.key),
425
+ model=getOr(model, "llama-2-13b-chat"),
426
+ brave_key=getOr(auth, "qztbjzBqJueQZLFkwTTJrieu8Vw3789u"),
427
427
  timeout=timeout,
428
428
  intro=intro,
429
429
  filepath=filepath,
@@ -471,6 +471,30 @@ class Main(cmd.Cmd):
471
471
  history_offset=history_offset,
472
472
  act=awesome_prompt,
473
473
  )
474
+ elif provider == "groq":
475
+ assert auth, (
476
+ "GROQ's API-key is required. " "Use the flag `--key` or `-k`"
477
+ )
478
+ from webscout.AI import GROQ
479
+
480
+
481
+ self.bot = GROQ(
482
+ api_key=auth,
483
+ is_conversation=disable_conversation,
484
+ max_tokens=max_tokens,
485
+ temperature=temperature,
486
+ presence_penalty=top_p,
487
+ frequency_penalty=top_k,
488
+ top_p=top_p,
489
+ model=getOr(model, "mixtral-8x7b-32768"),
490
+ timeout=timeout,
491
+ intro=intro,
492
+ filepath=filepath,
493
+ update_file=update_file,
494
+ proxies=proxies,
495
+ history_offset=history_offset,
496
+ act=awesome_prompt,
497
+ )
474
498
  elif provider == "sean":
475
499
  from webscout.AI import Sean
476
500
 
@@ -485,6 +509,44 @@ class Main(cmd.Cmd):
485
509
  history_offset=history_offset,
486
510
  act=awesome_prompt,
487
511
  )
512
+ elif provider == "cohere":
513
+ assert auth, (
514
+ "Cohere's API-key is required. Use the flag `--key` or `-k`"
515
+ )
516
+ from webscout.AI import Cohere
517
+ self.bot = Cohere(
518
+ api_key=auth,
519
+ is_conversation=disable_conversation,
520
+ max_tokens=max_tokens,
521
+ temperature=temperature,
522
+ top_k=top_k,
523
+ top_p=top_p,
524
+ model=getOr(model, "command-r-plus"),
525
+ timeout=timeout,
526
+ intro=intro,
527
+ filepath=filepath,
528
+ update_file=update_file,
529
+ proxies=proxies,
530
+ history_offset=history_offset,
531
+ act=awesome_prompt,
532
+ )
533
+ elif provider == "reka":
534
+ from webscout.AI import REKA
535
+
536
+ self.bot = REKA(
537
+ api_key=auth,
538
+ is_conversation=disable_conversation,
539
+ max_tokens=max_tokens,
540
+ timeout=timeout,
541
+ intro=intro,
542
+ filepath=filepath,
543
+ update_file=update_file,
544
+ proxies=proxies,
545
+ history_offset=history_offset,
546
+ act=awesome_prompt,
547
+ model=getOr(model, "reka-core"),
548
+ # quiet=quiet,
549
+ )
488
550
 
489
551
  elif provider == "koboldai":
490
552
  from webscout.AI import KOBOLDAI
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.3.4
3
+ Version: 1.3.6
4
4
  Summary: Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models, can transcribe yt videos, have TTS support and now has webai(terminal gpt and open interpeter) support
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -97,11 +97,12 @@ Search for anything using the Google, DuckDuckGo.com, yep.com, phind.com, you.co
97
97
  - [8. `OpenGPT` - chat With OPENGPT](#8-opengpt---chat-with-opengpt)
98
98
  - [9. `KOBOLDIA` -](#9-koboldia--)
99
99
  - [10. `Sean` - chat With Sean](#10-sean---chat-with-sean)
100
+ - [11. `Reka` - chat with reka](#11-reka---chat-with-reka)
101
+ - [12. `Cohere` - chat with cohere](#12-cohere---chat-with-cohere)
100
102
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
101
103
  - [`LLM`](#llm)
102
104
  - [`LLM` with internet](#llm-with-internet)
103
105
  - [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
104
- - [for using as terminal gpt](#for-using-as-terminal-gpt)
105
106
 
106
107
  ## Install
107
108
  ```python
@@ -698,6 +699,27 @@ response_str = a.chat(prompt)
698
699
  print(response_str)
699
700
  ```
700
701
 
702
+ ### 11. `Reka` - chat with reka
703
+ ```python
704
+ from webscout.AI import REKA
705
+
706
+ a = REKA(is_conversation=True, max_tokens=8000, timeout=30,api_key="")
707
+
708
+ prompt = "tell me about india"
709
+ response_str = a.chat(prompt)
710
+ print(response_str)
711
+ ```
712
+
713
+ ### 12. `Cohere` - chat with cohere
714
+ ```python
715
+ from webscout.AI import Cohere
716
+
717
+ a = Cohere(is_conversation=True, max_tokens=8000, timeout=30,api_key="")
718
+
719
+ prompt = "tell me about india"
720
+ response_str = a.chat(prompt)
721
+ print(response_str)
722
+ ```
701
723
  ## usage of special .LLM file from webscout (webscout.LLM)
702
724
 
703
725
  ### `LLM`
@@ -816,12 +838,12 @@ def use_rawdog_with_webai(prompt):
816
838
  try:
817
839
  webai_bot = Main(
818
840
  max_tokens=500,
819
- provider="phind",
841
+ provider="cohere",
820
842
  temperature=0.7,
821
843
  top_k=40,
822
844
  top_p=0.95,
823
- model="Phind Model", # Replace with your desired model
824
- auth=None, # Replace with your auth key/value (if needed)
845
+ model="command-r-plus", # Replace with your desired model
846
+ auth="0zoQbKs1AAgd8WrPBO9CTIGgVvm5ZMbDcCqJOVyl", # Replace with your auth key/value (if needed)
825
847
  timeout=30,
826
848
  disable_conversation=True,
827
849
  filepath=None,
@@ -841,11 +863,8 @@ def use_rawdog_with_webai(prompt):
841
863
  if __name__ == "__main__":
842
864
  user_prompt = input("Enter your prompt: ")
843
865
  use_rawdog_with_webai(user_prompt)
866
+
844
867
  ```
845
868
  ```shell
846
869
  python -m webscout.webai webai --provider "phind" --rawdog
847
870
  ```
848
- ### for using as terminal gpt
849
- ```python
850
- python -m webscout.webai webai --provider "sean"
851
- ```
@@ -1,2 +0,0 @@
1
- __version__ = "1.3.4"
2
-
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes