webscout 3.1__tar.gz → 3.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (74) hide show
  1. webscout-3.2/LICENSE.md +50 -0
  2. {webscout-3.1 → webscout-3.2}/PKG-INFO +36 -2
  3. {webscout-3.1 → webscout-3.2}/README.md +34 -0
  4. {webscout-3.1 → webscout-3.2}/setup.py +2 -2
  5. {webscout-3.1 → webscout-3.2}/webscout/AIutel.py +2 -0
  6. webscout-3.2/webscout/Provider/Deepseek.py +266 -0
  7. {webscout-3.1 → webscout-3.2}/webscout/Provider/__init__.py +2 -0
  8. {webscout-3.1 → webscout-3.2}/webscout/__init__.py +1 -0
  9. {webscout-3.1 → webscout-3.2}/webscout/webai.py +16 -0
  10. {webscout-3.1 → webscout-3.2}/webscout.egg-info/PKG-INFO +36 -2
  11. {webscout-3.1 → webscout-3.2}/webscout.egg-info/SOURCES.txt +1 -0
  12. webscout-3.1/LICENSE.md +0 -50
  13. {webscout-3.1 → webscout-3.2}/DeepWEBS/__init__.py +0 -0
  14. {webscout-3.1 → webscout-3.2}/DeepWEBS/documents/__init__.py +0 -0
  15. {webscout-3.1 → webscout-3.2}/DeepWEBS/documents/query_results_extractor.py +0 -0
  16. {webscout-3.1 → webscout-3.2}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
  17. {webscout-3.1 → webscout-3.2}/DeepWEBS/networks/__init__.py +0 -0
  18. {webscout-3.1 → webscout-3.2}/DeepWEBS/networks/filepath_converter.py +0 -0
  19. {webscout-3.1 → webscout-3.2}/DeepWEBS/networks/google_searcher.py +0 -0
  20. {webscout-3.1 → webscout-3.2}/DeepWEBS/networks/network_configs.py +0 -0
  21. {webscout-3.1 → webscout-3.2}/DeepWEBS/networks/webpage_fetcher.py +0 -0
  22. {webscout-3.1 → webscout-3.2}/DeepWEBS/utilsdw/__init__.py +0 -0
  23. {webscout-3.1 → webscout-3.2}/DeepWEBS/utilsdw/enver.py +0 -0
  24. {webscout-3.1 → webscout-3.2}/DeepWEBS/utilsdw/logger.py +0 -0
  25. {webscout-3.1 → webscout-3.2}/setup.cfg +0 -0
  26. {webscout-3.1 → webscout-3.2}/webscout/AIauto.py +0 -0
  27. {webscout-3.1 → webscout-3.2}/webscout/AIbase.py +0 -0
  28. {webscout-3.1 → webscout-3.2}/webscout/DWEBS.py +0 -0
  29. {webscout-3.1 → webscout-3.2}/webscout/LLM.py +0 -0
  30. {webscout-3.1 → webscout-3.2}/webscout/Local/__init__.py +0 -0
  31. {webscout-3.1 → webscout-3.2}/webscout/Local/_version.py +0 -0
  32. {webscout-3.1 → webscout-3.2}/webscout/Local/formats.py +0 -0
  33. {webscout-3.1 → webscout-3.2}/webscout/Local/model.py +0 -0
  34. {webscout-3.1 → webscout-3.2}/webscout/Local/rawdog.py +0 -0
  35. {webscout-3.1 → webscout-3.2}/webscout/Local/samplers.py +0 -0
  36. {webscout-3.1 → webscout-3.2}/webscout/Local/thread.py +0 -0
  37. {webscout-3.1 → webscout-3.2}/webscout/Local/utils.py +0 -0
  38. {webscout-3.1 → webscout-3.2}/webscout/Provider/BasedGPT.py +0 -0
  39. {webscout-3.1 → webscout-3.2}/webscout/Provider/Berlin4h.py +0 -0
  40. {webscout-3.1 → webscout-3.2}/webscout/Provider/Blackboxai.py +0 -0
  41. {webscout-3.1 → webscout-3.2}/webscout/Provider/ChatGPTUK.py +0 -0
  42. {webscout-3.1 → webscout-3.2}/webscout/Provider/Cohere.py +0 -0
  43. {webscout-3.1 → webscout-3.2}/webscout/Provider/Gemini.py +0 -0
  44. {webscout-3.1 → webscout-3.2}/webscout/Provider/Groq.py +0 -0
  45. {webscout-3.1 → webscout-3.2}/webscout/Provider/Koboldai.py +0 -0
  46. {webscout-3.1 → webscout-3.2}/webscout/Provider/Leo.py +0 -0
  47. {webscout-3.1 → webscout-3.2}/webscout/Provider/Llama2.py +0 -0
  48. {webscout-3.1 → webscout-3.2}/webscout/Provider/OpenGPT.py +0 -0
  49. {webscout-3.1 → webscout-3.2}/webscout/Provider/Openai.py +0 -0
  50. {webscout-3.1 → webscout-3.2}/webscout/Provider/Perplexity.py +0 -0
  51. {webscout-3.1 → webscout-3.2}/webscout/Provider/Phind.py +0 -0
  52. {webscout-3.1 → webscout-3.2}/webscout/Provider/Poe.py +0 -0
  53. {webscout-3.1 → webscout-3.2}/webscout/Provider/Reka.py +0 -0
  54. {webscout-3.1 → webscout-3.2}/webscout/Provider/ThinkAnyAI.py +0 -0
  55. {webscout-3.1 → webscout-3.2}/webscout/Provider/Xjai.py +0 -0
  56. {webscout-3.1 → webscout-3.2}/webscout/Provider/Yepchat.py +0 -0
  57. {webscout-3.1 → webscout-3.2}/webscout/Provider/Youchat.py +0 -0
  58. {webscout-3.1 → webscout-3.2}/webscout/__main__.py +0 -0
  59. {webscout-3.1 → webscout-3.2}/webscout/async_providers.py +0 -0
  60. {webscout-3.1 → webscout-3.2}/webscout/cli.py +0 -0
  61. {webscout-3.1 → webscout-3.2}/webscout/exceptions.py +0 -0
  62. {webscout-3.1 → webscout-3.2}/webscout/g4f.py +0 -0
  63. {webscout-3.1 → webscout-3.2}/webscout/models.py +0 -0
  64. {webscout-3.1 → webscout-3.2}/webscout/tempid.py +0 -0
  65. {webscout-3.1 → webscout-3.2}/webscout/transcriber.py +0 -0
  66. {webscout-3.1 → webscout-3.2}/webscout/utils.py +0 -0
  67. {webscout-3.1 → webscout-3.2}/webscout/version.py +0 -0
  68. {webscout-3.1 → webscout-3.2}/webscout/voice.py +0 -0
  69. {webscout-3.1 → webscout-3.2}/webscout/webscout_search.py +0 -0
  70. {webscout-3.1 → webscout-3.2}/webscout/webscout_search_async.py +0 -0
  71. {webscout-3.1 → webscout-3.2}/webscout.egg-info/dependency_links.txt +0 -0
  72. {webscout-3.1 → webscout-3.2}/webscout.egg-info/entry_points.txt +0 -0
  73. {webscout-3.1 → webscout-3.2}/webscout.egg-info/requires.txt +0 -0
  74. {webscout-3.1 → webscout-3.2}/webscout.egg-info/top_level.txt +0 -0
@@ -0,0 +1,50 @@
1
+ ************************************************
2
+ **** HelpingAI License ****
3
+ ************************************************
4
+
5
+ Version 2.0
6
+
7
+ Developed by Abhay Koul
8
+
9
+ ### Preamble
10
+
11
+ The HelpingAI License governs the use of HelpingAI's digital assets, including but not limited to software, scripts, datasets, documents, images, audio recordings, videos. The HelpingAI License aims to provide clear, comprehensive terms for accessing, modifying, and sharing resources, while promoting ethical development practices.
12
+
13
+ ### Grant of Rights
14
+
15
+ Under the HelpingAI License, HelpingAI grants you the rights to copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Content, provided you comply with the terms and conditions outlined in this document.
16
+
17
+ ### Terms and Conditions
18
+
19
+ To exercise the rights granted in the previous section, you must adhere to the following terms and conditions:
20
+
21
+ 2.1. **Redistribution of Source Code.**
22
+ If you redistribute the Source Code, you must include the complete HelpingAI License with your distribution. You must also add clear notifications in all modified files stating:
23
+
24
+ > "This Work is released under the HelpingAI License v2.0."
25
+
26
+ 2.2. **Distribution in Binary Form.**
27
+ If you distribute Binaries derived from the Source Code, you must include the following statement in your distribution:
28
+
29
+ > "This Work is based on the HelpingAI Licensed Work, under the HelpingAI License v2.0."
30
+
31
+ 2.3. **Notification of Changes.**
32
+ You must clearly indicate any modifications you make to the Source Code or Documentation, including detailed comments about the nature and extent of the changes. Include the date and originator of the modifications.
33
+
34
+ 2.4. **Branding Attribution.**
35
+ You must not remove or alter any HelpingAI branding, logos, or notices included in the Content without explicit prior consent from HelpingAI.
36
+
37
+ 2.5. **Disclaimer of Warranty.**
38
+ The Content is provided "AS IS," without any implied warranties, including but not limited to warranties of merchantability, fitness for a particular purpose, and non-infringement.
39
+
40
+ 2.6. **Limitation of Liability.**
41
+ To the maximum extent permitted by law, neither HelpingAI nor any contributor shall be liable for any loss, personal injury, property damage, or any indirect, special, incidental, or consequential damages arising from or related to the use of the Content.
42
+
43
+ 2.7. **Governing Law.**
44
+ This HelpingAI License shall be governed and construed in accordance with the laws of the jurisdiction where HelpingAI primarily operates.
45
+
46
+ ### Definitions
47
+
48
+ 3.1. **"Source Code"** refers to the preferred form for making modifications to the Content, typically represented by human-readable programming languages, scripts, or documentation formats.
49
+
50
+ 3.2. **"Binaries"** refers to compiled forms of the Source Code, such as executables, libraries, or similar artifacts produced from the Source Code.
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 3.1
3
+ Version: 3.2
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
- License: HelpingAI Simplified Universal License
7
+ License: HelpingAI
8
8
  Project-URL: Documentation, https://github.com/OE-LUCIFER/Webscout/wiki
9
9
  Project-URL: Source, https://github.com/OE-LUCIFER/Webscout
10
10
  Project-URL: Tracker, https://github.com/OE-LUCIFER/Webscout/issues
@@ -140,6 +140,7 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
140
140
  - [14. `chatgptuk` - Chat with gemini-pro](#14-chatgptuk---chat-with-gemini-pro)
141
141
  - [15. `poe`- chat with poe](#15-poe--chat-with-poe)
142
142
  - [16. `BasedGPT` - chat with GPT](#16-basedgpt---chat-with-gpt)
143
+ - [17. `DeepSeek` -chat with deepseek](#17-deepseek--chat-with-deepseek)
143
144
  - [`LLM`](#llm)
144
145
  - [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
145
146
  - [`Function-calling-local-llm`](#function-calling-local-llm)
@@ -1219,6 +1220,39 @@ Usage code similar to other proviers
1219
1220
 
1220
1221
  ### 16. `BasedGPT` - chat with GPT
1221
1222
  Usage code similar to other providers
1223
+
1224
+ ### 17. `DeepSeek` -chat with deepseek
1225
+ ```python
1226
+ from webscout import DeepSeek
1227
+ from rich import print
1228
+
1229
+ ai = DeepSeek(
1230
+ is_conversation=True,
1231
+ api_key='', # Watch this video https://youtu.be/Euin6p5Ryks?si=-84JBtyqGwMzvdIq to know from where u can get this key for free
1232
+ max_tokens=800,
1233
+ timeout=30,
1234
+ intro=None,
1235
+ filepath=None,
1236
+ update_file=True,
1237
+ proxies={},
1238
+ history_offset=10250,
1239
+ act=None,
1240
+ model="deepseek_chat"
1241
+ )
1242
+
1243
+ # Start an infinite loop for continuous interaction
1244
+ while True:
1245
+ # Define a prompt to send to the AI
1246
+ prompt = input("Enter your prompt: ")
1247
+
1248
+ # Check if the user wants to exit the loop
1249
+ if prompt.lower() == "exit":
1250
+ break
1251
+
1252
+ # Use the 'chat' method to send the prompt and receive a response
1253
+ r = ai.chat(prompt)
1254
+ print(r)
1255
+ ```
1222
1256
  ### `LLM`
1223
1257
  ```python
1224
1258
  from webscout.LLM import LLM
@@ -75,6 +75,7 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
75
75
  - [14. `chatgptuk` - Chat with gemini-pro](#14-chatgptuk---chat-with-gemini-pro)
76
76
  - [15. `poe`- chat with poe](#15-poe--chat-with-poe)
77
77
  - [16. `BasedGPT` - chat with GPT](#16-basedgpt---chat-with-gpt)
78
+ - [17. `DeepSeek` -chat with deepseek](#17-deepseek--chat-with-deepseek)
78
79
  - [`LLM`](#llm)
79
80
  - [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
80
81
  - [`Function-calling-local-llm`](#function-calling-local-llm)
@@ -1154,6 +1155,39 @@ Usage code similar to other proviers
1154
1155
 
1155
1156
  ### 16. `BasedGPT` - chat with GPT
1156
1157
  Usage code similar to other providers
1158
+
1159
+ ### 17. `DeepSeek` -chat with deepseek
1160
+ ```python
1161
+ from webscout import DeepSeek
1162
+ from rich import print
1163
+
1164
+ ai = DeepSeek(
1165
+ is_conversation=True,
1166
+ api_key='', # Watch this video https://youtu.be/Euin6p5Ryks?si=-84JBtyqGwMzvdIq to know from where u can get this key for free
1167
+ max_tokens=800,
1168
+ timeout=30,
1169
+ intro=None,
1170
+ filepath=None,
1171
+ update_file=True,
1172
+ proxies={},
1173
+ history_offset=10250,
1174
+ act=None,
1175
+ model="deepseek_chat"
1176
+ )
1177
+
1178
+ # Start an infinite loop for continuous interaction
1179
+ while True:
1180
+ # Define a prompt to send to the AI
1181
+ prompt = input("Enter your prompt: ")
1182
+
1183
+ # Check if the user wants to exit the loop
1184
+ if prompt.lower() == "exit":
1185
+ break
1186
+
1187
+ # Use the 'chat' method to send the prompt and receive a response
1188
+ r = ai.chat(prompt)
1189
+ print(r)
1190
+ ```
1157
1191
  ### `LLM`
1158
1192
  ```python
1159
1193
  from webscout.LLM import LLM
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="3.1",
8
+ version="3.2",
9
9
  description="Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -77,7 +77,7 @@ setup(
77
77
  'huggingface_hub',
78
78
  ],
79
79
  },
80
- license="HelpingAI Simplified Universal License",
80
+ license="HelpingAI",
81
81
  project_urls={
82
82
  "Documentation": "https://github.com/OE-LUCIFER/Webscout/wiki",
83
83
  "Source": "https://github.com/OE-LUCIFER/Webscout",
@@ -47,6 +47,7 @@ webai = [
47
47
  "auto",
48
48
  "poe",
49
49
  "basedgpt",
50
+ "deepseek",
50
51
  ]
51
52
  gpt4free_providers = [
52
53
  provider.__name__ for provider in g4f.Provider.__providers__ # if provider.working
@@ -540,6 +541,7 @@ print("The essay is about...")
540
541
  """
541
542
  if not quiet:
542
543
  print(
544
+ "Rawdog is an experimental tool that generates and auto-executes Python scripts in the cli.\n"
543
545
  "To get the most out of Rawdog. Ensure the following are installed:\n"
544
546
  " 1. Python 3.x\n"
545
547
  " 2. Dependency:\n"
@@ -0,0 +1,266 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from ..AIutel import Optimizers
23
+ from ..AIutel import Conversation
24
+ from ..AIutel import AwesomePrompts, sanitize_stream
25
+ from ..AIbase import Provider, AsyncProvider
26
+ from Helpingai_T2 import Perplexity
27
+ from webscout import exceptions
28
+ from typing import Any, AsyncGenerator, Dict, Optional
29
+ import logging
30
+ import httpx
31
+ import os
32
+ from dotenv import load_dotenv; load_dotenv()
33
+
34
+ #-----------------------------------------------DeepSeek--------------------------------------------
35
+ class DeepSeek(Provider):
36
+ def __init__(
37
+ self,
38
+ api_key: str,
39
+ is_conversation: bool = True,
40
+ max_tokens: int = 600,
41
+ timeout: int = 30,
42
+ intro: str = None,
43
+ filepath: str = None,
44
+ update_file: bool = True,
45
+ proxies: dict = {},
46
+ history_offset: int = 10250,
47
+ act: str = None,
48
+ model: str = 'deepseek_chat',
49
+ temperature: float = 1.0,
50
+ ):
51
+ """Initializes DeepSeek
52
+
53
+ Args:
54
+ api_key (str): DeepSeek API key.
55
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
56
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
57
+ timeout (int, optional): Http request timeout. Defaults to 30.
58
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
59
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
60
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
61
+ proxies (dict, optional): Http request proxies. Defaults to {}.
62
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
63
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
64
+ model_type (str, optional): DeepSeek model type. Defaults to 'deepseek_chat'.
65
+ temperature (float, optional): Creativity level of the response. Defaults to 1.0.
66
+ """
67
+ self.api_token = api_key
68
+ self.auth_headers = {
69
+ 'Authorization': f'Bearer {self.api_token}'
70
+ }
71
+ self.api_base_url = 'https://chat.deepseek.com/api/v0/chat'
72
+ self.api_session = requests.Session()
73
+ self.api_session.headers.update(self.auth_headers)
74
+
75
+ self.is_conversation = is_conversation
76
+ self.max_tokens_to_sample = max_tokens
77
+ self.timeout = timeout
78
+ self.last_response = {}
79
+ self.model_type = model
80
+ self.temperature = temperature
81
+ self.__available_optimizers = (
82
+ method
83
+ for method in dir(Optimizers)
84
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
85
+ )
86
+ Conversation.intro = (
87
+ AwesomePrompts().get_act(
88
+ act, raise_not_found=True, default=None, case_insensitive=True
89
+ )
90
+ if act
91
+ else intro or Conversation.intro
92
+ )
93
+ self.conversation = Conversation(
94
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
95
+ )
96
+ self.conversation.history_offset = history_offset
97
+ # self.session.proxies = proxies
98
+
99
+ def clear_chat(self) -> None:
100
+ """
101
+ Clears the chat context by making a POST request to the clear_context endpoint.
102
+ """
103
+ clear_payload = {"model_class": "deepseek_chat", "append_welcome_message": False}
104
+ clear_response = self.api_session.post(f'{self.api_base_url}/clear_context', json=clear_payload)
105
+ clear_response.raise_for_status() # Raises an HTTPError if the HTTP request returned an unsuccessful status code
106
+
107
+ def generate(self, user_message: str, response_temperature: float = 1.0, model_type: Optional[str] = "deepseek_chat", verbose: bool = False) -> str:
108
+ """
109
+ Generates a response from the DeepSeek API based on the provided message.
110
+
111
+ Args:
112
+ user_message (str): The message to send to the chat API.
113
+ response_temperature (float, optional): The creativity level of the response. Defaults to 1.0.
114
+ model_type (str, optional): The model class to be used for the chat session.
115
+ verbose (bool, optional): Whether to print the response content. Defaults to False.
116
+
117
+ Returns:
118
+ str: The concatenated response content received from the API.
119
+
120
+ Available models:
121
+ - deepseek_chat
122
+ - deepseek_code
123
+ """
124
+ request_payload = {
125
+ "message": user_message,
126
+ "stream": True,
127
+ "model_preference": None,
128
+ "model_class": model_type,
129
+ "temperature": response_temperature
130
+ }
131
+ api_response = self.api_session.post(f'{self.api_base_url}/completions', json=request_payload, stream=True)
132
+ api_response.raise_for_status()
133
+
134
+ combined_response = ""
135
+ for response_line in api_response.iter_lines(decode_unicode=True, chunk_size=1):
136
+ if response_line:
137
+ cleaned_line = re.sub("data:", "", response_line)
138
+ response_json = json.loads(cleaned_line)
139
+ response_content = response_json['choices'][0]['delta']['content']
140
+ if response_content and not re.match(r'^\s{5,}$', response_content):
141
+ if verbose: print(response_content, end="", flush=True)
142
+ combined_response += response_content
143
+
144
+ return combined_response
145
+
146
+ def ask(
147
+ self,
148
+ prompt: str,
149
+ stream: bool = False,
150
+ raw: bool = False,
151
+ optimizer: str = None,
152
+ conversationally: bool = False,
153
+ ) -> dict:
154
+ """Chat with AI
155
+
156
+ Args:
157
+ prompt (str): Prompt to be send.
158
+ stream (bool, optional): Flag for streaming response. Defaults to False.
159
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
160
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
161
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
162
+ Returns:
163
+ dict : {}
164
+ ```json
165
+ {
166
+ "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
167
+ "object": "chat.completion",
168
+ "created": 1704623244,
169
+ "model": "gpt-3.5-turbo",
170
+ "usage": {
171
+ "prompt_tokens": 0,
172
+ "completion_tokens": 0,
173
+ "total_tokens": 0
174
+ },
175
+ "choices": [
176
+ {
177
+ "message": {
178
+ "role": "assistant",
179
+ "content": "Hello! How can I assist you today?"
180
+ },
181
+ "finish_reason": "stop",
182
+ "index": 0
183
+ }
184
+ ]
185
+ }
186
+ ```
187
+ """
188
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
189
+ if optimizer:
190
+ if optimizer in self.__available_optimizers:
191
+ conversation_prompt = getattr(Optimizers, optimizer)(
192
+ conversation_prompt if conversationally else prompt
193
+ )
194
+ else:
195
+ raise Exception(
196
+ f"Optimizer is not one of {self.__available_optimizers}"
197
+ )
198
+
199
+ def for_stream():
200
+ response = self.generate(
201
+ user_message=conversation_prompt,
202
+ response_temperature=self.temperature,
203
+ model_type=self.model_type,
204
+ verbose=False,
205
+ )
206
+ # print(response)
207
+ self.last_response.update(dict(text=response))
208
+ self.conversation.update_chat_history(
209
+ prompt, self.get_message(self.last_response)
210
+ )
211
+ yield dict(text=response) if raw else dict(text=response)
212
+
213
+ def for_non_stream():
214
+ # let's make use of stream
215
+ for _ in for_stream():
216
+ pass
217
+ return self.last_response
218
+
219
+ return for_stream() if stream else for_non_stream()
220
+
221
+ def chat(
222
+ self,
223
+ prompt: str,
224
+ stream: bool = False,
225
+ optimizer: str = None,
226
+ conversationally: bool = False,
227
+ ) -> str:
228
+ """Generate response `str`
229
+ Args:
230
+ prompt (str): Prompt to be send.
231
+ stream (bool, optional): Flag for streaming response. Defaults to False.
232
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
233
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
234
+ Returns:
235
+ str: Response generated
236
+ """
237
+
238
+ def for_stream():
239
+ for response in self.ask(
240
+ prompt, True, optimizer=optimizer, conversationally=conversationally
241
+ ):
242
+ yield self.get_message(response)
243
+
244
+ def for_non_stream():
245
+ return self.get_message(
246
+ self.ask(
247
+ prompt,
248
+ False,
249
+ optimizer=optimizer,
250
+ conversationally=conversationally,
251
+ )
252
+ )
253
+
254
+ return for_stream() if stream else for_non_stream()
255
+
256
+ def get_message(self, response: dict) -> str:
257
+ """Retrieves message only from response
258
+
259
+ Args:
260
+ response (dict): Response generated by `self.ask`
261
+
262
+ Returns:
263
+ str: Message extracted
264
+ """
265
+ assert isinstance(response, dict), "Response should be of dict data-type only"
266
+ return response["text"]
@@ -29,6 +29,7 @@ from .Berlin4h import Berlin4h
29
29
  from .ChatGPTUK import ChatGPTUK
30
30
  from .Poe import POE
31
31
  from .BasedGPT import BasedGPT
32
+ from .Deepseek import DeepSeek
32
33
  __all__ = [
33
34
  'ThinkAnyAI',
34
35
  'Xjai',
@@ -59,4 +60,5 @@ __all__ = [
59
60
  'ChatGPTUK',
60
61
  'POE',
61
62
  'BasedGPT',
63
+ 'DeepSeek',
62
64
  ]
@@ -35,6 +35,7 @@ webai = [
35
35
  "auto",
36
36
  "poe",
37
37
  "basedgpt",
38
+ "deepseek",
38
39
  ]
39
40
 
40
41
  gpt4free_providers = [
@@ -625,7 +625,23 @@ class Main(cmd.Cmd):
625
625
  model=getOr(model, "reka-core"),
626
626
  # quiet=quiet,
627
627
  )
628
+ elif provider == "deepseek":
629
+ from webscout import DeepSeek
628
630
 
631
+ self.bot = DeepSeek(
632
+ api_key=auth,
633
+ is_conversation=disable_conversation,
634
+ max_tokens=max_tokens,
635
+ timeout=timeout,
636
+ intro=intro,
637
+ filepath=filepath,
638
+ update_file=update_file,
639
+ proxies=proxies,
640
+ history_offset=history_offset,
641
+ act=awesome_prompt,
642
+ model=getOr(model, "deepseek_chat"),
643
+ # quiet=quiet,
644
+ )
629
645
  elif provider == "koboldai":
630
646
  from webscout import KOBOLDAI
631
647
 
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 3.1
3
+ Version: 3.2
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
- License: HelpingAI Simplified Universal License
7
+ License: HelpingAI
8
8
  Project-URL: Documentation, https://github.com/OE-LUCIFER/Webscout/wiki
9
9
  Project-URL: Source, https://github.com/OE-LUCIFER/Webscout
10
10
  Project-URL: Tracker, https://github.com/OE-LUCIFER/Webscout/issues
@@ -140,6 +140,7 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
140
140
  - [14. `chatgptuk` - Chat with gemini-pro](#14-chatgptuk---chat-with-gemini-pro)
141
141
  - [15. `poe`- chat with poe](#15-poe--chat-with-poe)
142
142
  - [16. `BasedGPT` - chat with GPT](#16-basedgpt---chat-with-gpt)
143
+ - [17. `DeepSeek` -chat with deepseek](#17-deepseek--chat-with-deepseek)
143
144
  - [`LLM`](#llm)
144
145
  - [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
145
146
  - [`Function-calling-local-llm`](#function-calling-local-llm)
@@ -1219,6 +1220,39 @@ Usage code similar to other proviers
1219
1220
 
1220
1221
  ### 16. `BasedGPT` - chat with GPT
1221
1222
  Usage code similar to other providers
1223
+
1224
+ ### 17. `DeepSeek` -chat with deepseek
1225
+ ```python
1226
+ from webscout import DeepSeek
1227
+ from rich import print
1228
+
1229
+ ai = DeepSeek(
1230
+ is_conversation=True,
1231
+ api_key='', # Watch this video https://youtu.be/Euin6p5Ryks?si=-84JBtyqGwMzvdIq to know from where u can get this key for free
1232
+ max_tokens=800,
1233
+ timeout=30,
1234
+ intro=None,
1235
+ filepath=None,
1236
+ update_file=True,
1237
+ proxies={},
1238
+ history_offset=10250,
1239
+ act=None,
1240
+ model="deepseek_chat"
1241
+ )
1242
+
1243
+ # Start an infinite loop for continuous interaction
1244
+ while True:
1245
+ # Define a prompt to send to the AI
1246
+ prompt = input("Enter your prompt: ")
1247
+
1248
+ # Check if the user wants to exit the loop
1249
+ if prompt.lower() == "exit":
1250
+ break
1251
+
1252
+ # Use the 'chat' method to send the prompt and receive a response
1253
+ r = ai.chat(prompt)
1254
+ print(r)
1255
+ ```
1222
1256
  ### `LLM`
1223
1257
  ```python
1224
1258
  from webscout.LLM import LLM
@@ -52,6 +52,7 @@ webscout/Provider/Berlin4h.py
52
52
  webscout/Provider/Blackboxai.py
53
53
  webscout/Provider/ChatGPTUK.py
54
54
  webscout/Provider/Cohere.py
55
+ webscout/Provider/Deepseek.py
55
56
  webscout/Provider/Gemini.py
56
57
  webscout/Provider/Groq.py
57
58
  webscout/Provider/Koboldai.py
webscout-3.1/LICENSE.md DELETED
@@ -1,50 +0,0 @@
1
- ****************************************
2
- **** HelpingAI Simplified Universal License ****
3
- ****************************************
4
-
5
- Version 1.0
6
-
7
- ### Introduction
8
-
9
- This HelpingAI Simplified Universal License (HSUL) governs HelpingAI's content, including computer programs, scripts, datasets, documents, images, audio recordings, videos, and other digital assets. The HSUL provides simple, universal terms for accessing, modifying, and sharing resources while embracing ethical development practices.
10
-
11
- ### Grant of Rights
12
-
13
- Under the HSUL, HelpingAI authorizes you to copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Content, subject to the terms and conditions detailed in this document.
14
-
15
- ### Conditions
16
-
17
- To qualify for the rights granted in section 1, you must strictly adhere to the following conditions:
18
-
19
- 2.1. **Redistributions of Source Code.**
20
- If you redistribute the Source Code, you must include the entire HSUL with your distribution. Furthermore, you must add prominent notifications in all affected files stating:
21
-
22
- > "This Work is released under the HelpingAI Simplified Universal License v1.0."
23
-
24
- 2.2. **Binary Form Redistributions.**
25
- If you distribute Binaries generated from the Source Code, you must ensure the inclusion of the following statement in your distribution:
26
-
27
- > "This Work is based upon the HelpingAI Simplified Universally Licensed Work, under the HelpingAI Simplified Universal License v1.0."
28
-
29
- 2.3. **Notification of Changes.**
30
- Clearly indicate any alterations you introduce to the Source Code or Documentation via prominent comments detailing the nature and scope of the change(s). Reference the date and originator of the modifications.
31
-
32
- 2.4. **Branding Attribution.**
33
- Do not remove or alter any HelpingAI branding, logos, or notices included in the Content without explicit prior consent from HelpingAI.
34
-
35
- 2.5. **Exclusion of Warranty.**
36
- The Content is delivered "AS IS," bereft of any implicit guarantee, including — though not constrained to — warranties pertaining to marketability, applicability for a particular purpose, and non-infringement.
37
-
38
- 2.6. **Limitation of Liability.**
39
- To the maximum extent allowed by law, neither HelpingAI nor any contributor shall bear responsibility for any loss, personal injury, property damage, indirect, special, incidental, or consequential damages stemming from or relating to the Content or its employment.
40
-
41
- 2.7. **Governing Law.**
42
- This HSUL shall be managed and construed according to the laws of the jurisdiction where HelpingAI primarily operates.
43
-
44
- ### Definitions
45
-
46
- 3.1. **"Source Code"** signifies the preferred form for editing the Content, typically represented by human-readable programming languages, scripts, or documentation formats.
47
-
48
- 3.2. **"Binaries"** denote compiled forms of the Source Code, executables, libraries, or similar artifacts built from the Source Code.
49
-
50
- By leveraging this Content, you confirm your approval of the HSUL and pledge to honor its terms and conditions. If you disagree with the HSUL's rules, refrain from engaging with the Content.
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes