webscout 3.1b0__py3-none-any.whl → 3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AIutel.py CHANGED
@@ -47,6 +47,7 @@ webai = [
47
47
  "auto",
48
48
  "poe",
49
49
  "basedgpt",
50
+ "deepseek",
50
51
  ]
51
52
  gpt4free_providers = [
52
53
  provider.__name__ for provider in g4f.Provider.__providers__ # if provider.working
@@ -213,12 +214,12 @@ class Conversation:
213
214
  ), f"File '{filepath}' does not exist"
214
215
  if not os.path.isfile(filepath):
215
216
  logging.debug(f"Creating new chat-history file - '{filepath}'")
216
- with open(filepath, "w") as fh: # Try creating new file
217
+ with open(filepath, "w", encoding='utf-8') as fh: # Try creating new file
217
218
  # lets add intro here
218
219
  fh.write(self.intro)
219
220
  else:
220
221
  logging.debug(f"Loading conversation from '{filepath}'")
221
- with open(filepath) as fh:
222
+ with open(filepath, encoding='utf-8') as fh:
222
223
  file_contents = fh.read()
223
224
  # Presume intro prompt is part of the file content
224
225
  self.chat_history = file_contents
@@ -269,7 +270,7 @@ class Conversation:
269
270
  return
270
271
  new_history = self.history_format % dict(user=prompt, llm=response)
271
272
  if self.file and self.update_file:
272
- with open(self.file, "a") as fh:
273
+ with open(self.file, "a", encoding='utf-8') as fh:
273
274
  fh.write(new_history)
274
275
  self.chat_history += new_history
275
276
 
@@ -540,6 +541,7 @@ print("The essay is about...")
540
541
  """
541
542
  if not quiet:
542
543
  print(
544
+ "Rawdog is an experimental tool that generates and auto-executes Python scripts in the cli.\n"
543
545
  "To get the most out of Rawdog. Ensure the following are installed:\n"
544
546
  " 1. Python 3.x\n"
545
547
  " 2. Dependency:\n"
@@ -8,3 +8,4 @@ from . import utils
8
8
 
9
9
  from .model import Model
10
10
  from .thread import Thread
11
+ from .rawdog import *
@@ -1,3 +1,3 @@
1
1
  from llama_cpp import __version__ as __llama_cpp_version__
2
2
 
3
- __version__ = '2.9'
3
+ __version__ = '3.1'
webscout/Local/rawdog.py CHANGED
@@ -21,18 +21,16 @@ import logging
21
21
  import appdirs
22
22
  import datetime
23
23
  import re
24
- from .model import *
25
- from .utils import (
26
- RESET_ALL,
27
- _SupportsWriteAndFlush,
28
- cls,
29
- print_verbose,
30
- truncate,
31
- run_system_command
32
- )
24
+ from .model import Model, assert_model_is_loaded, _SupportsWriteAndFlush
25
+ from .utils import RESET_ALL, cls, print_verbose, truncate
33
26
  from .samplers import SamplerSettings, DefaultSampling
34
- from .formats import AdvancedFormat
27
+ from typing import Optional, Literal, Union
28
+ from .formats import AdvancedFormat
29
+
35
30
  from .formats import blank as formats_blank
31
+ from ..AIutel import *
32
+ from .samplers import SamplerSettings, DefaultSampling
33
+ from .formats import AdvancedFormat
36
34
  from rich.markdown import Markdown
37
35
  from rich.console import Console
38
36
  appdir = appdirs.AppDirs("AIWEBS", "vortex")
@@ -0,0 +1,266 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from ..AIutel import Optimizers
23
+ from ..AIutel import Conversation
24
+ from ..AIutel import AwesomePrompts, sanitize_stream
25
+ from ..AIbase import Provider, AsyncProvider
26
+ from Helpingai_T2 import Perplexity
27
+ from webscout import exceptions
28
+ from typing import Any, AsyncGenerator, Dict, Optional
29
+ import logging
30
+ import httpx
31
+ import os
32
+ from dotenv import load_dotenv; load_dotenv()
33
+
34
+ #-----------------------------------------------DeepSeek--------------------------------------------
35
+ class DeepSeek(Provider):
36
+ def __init__(
37
+ self,
38
+ api_key: str,
39
+ is_conversation: bool = True,
40
+ max_tokens: int = 600,
41
+ timeout: int = 30,
42
+ intro: str = None,
43
+ filepath: str = None,
44
+ update_file: bool = True,
45
+ proxies: dict = {},
46
+ history_offset: int = 10250,
47
+ act: str = None,
48
+ model: str = 'deepseek_chat',
49
+ temperature: float = 1.0,
50
+ ):
51
+ """Initializes DeepSeek
52
+
53
+ Args:
54
+ api_key (str): DeepSeek API key.
55
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
56
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
57
+ timeout (int, optional): Http request timeout. Defaults to 30.
58
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
59
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
60
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
61
+ proxies (dict, optional): Http request proxies. Defaults to {}.
62
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
63
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
64
+ model_type (str, optional): DeepSeek model type. Defaults to 'deepseek_chat'.
65
+ temperature (float, optional): Creativity level of the response. Defaults to 1.0.
66
+ """
67
+ self.api_token = api_key
68
+ self.auth_headers = {
69
+ 'Authorization': f'Bearer {self.api_token}'
70
+ }
71
+ self.api_base_url = 'https://chat.deepseek.com/api/v0/chat'
72
+ self.api_session = requests.Session()
73
+ self.api_session.headers.update(self.auth_headers)
74
+
75
+ self.is_conversation = is_conversation
76
+ self.max_tokens_to_sample = max_tokens
77
+ self.timeout = timeout
78
+ self.last_response = {}
79
+ self.model_type = model
80
+ self.temperature = temperature
81
+ self.__available_optimizers = (
82
+ method
83
+ for method in dir(Optimizers)
84
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
85
+ )
86
+ Conversation.intro = (
87
+ AwesomePrompts().get_act(
88
+ act, raise_not_found=True, default=None, case_insensitive=True
89
+ )
90
+ if act
91
+ else intro or Conversation.intro
92
+ )
93
+ self.conversation = Conversation(
94
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
95
+ )
96
+ self.conversation.history_offset = history_offset
97
+ # self.session.proxies = proxies
98
+
99
+ def clear_chat(self) -> None:
100
+ """
101
+ Clears the chat context by making a POST request to the clear_context endpoint.
102
+ """
103
+ clear_payload = {"model_class": "deepseek_chat", "append_welcome_message": False}
104
+ clear_response = self.api_session.post(f'{self.api_base_url}/clear_context', json=clear_payload)
105
+ clear_response.raise_for_status() # Raises an HTTPError if the HTTP request returned an unsuccessful status code
106
+
107
+ def generate(self, user_message: str, response_temperature: float = 1.0, model_type: Optional[str] = "deepseek_chat", verbose: bool = False) -> str:
108
+ """
109
+ Generates a response from the DeepSeek API based on the provided message.
110
+
111
+ Args:
112
+ user_message (str): The message to send to the chat API.
113
+ response_temperature (float, optional): The creativity level of the response. Defaults to 1.0.
114
+ model_type (str, optional): The model class to be used for the chat session.
115
+ verbose (bool, optional): Whether to print the response content. Defaults to False.
116
+
117
+ Returns:
118
+ str: The concatenated response content received from the API.
119
+
120
+ Available models:
121
+ - deepseek_chat
122
+ - deepseek_code
123
+ """
124
+ request_payload = {
125
+ "message": user_message,
126
+ "stream": True,
127
+ "model_preference": None,
128
+ "model_class": model_type,
129
+ "temperature": response_temperature
130
+ }
131
+ api_response = self.api_session.post(f'{self.api_base_url}/completions', json=request_payload, stream=True)
132
+ api_response.raise_for_status()
133
+
134
+ combined_response = ""
135
+ for response_line in api_response.iter_lines(decode_unicode=True, chunk_size=1):
136
+ if response_line:
137
+ cleaned_line = re.sub("data:", "", response_line)
138
+ response_json = json.loads(cleaned_line)
139
+ response_content = response_json['choices'][0]['delta']['content']
140
+ if response_content and not re.match(r'^\s{5,}$', response_content):
141
+ if verbose: print(response_content, end="", flush=True)
142
+ combined_response += response_content
143
+
144
+ return combined_response
145
+
146
+ def ask(
147
+ self,
148
+ prompt: str,
149
+ stream: bool = False,
150
+ raw: bool = False,
151
+ optimizer: str = None,
152
+ conversationally: bool = False,
153
+ ) -> dict:
154
+ """Chat with AI
155
+
156
+ Args:
157
+ prompt (str): Prompt to be send.
158
+ stream (bool, optional): Flag for streaming response. Defaults to False.
159
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
160
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
161
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
162
+ Returns:
163
+ dict : {}
164
+ ```json
165
+ {
166
+ "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
167
+ "object": "chat.completion",
168
+ "created": 1704623244,
169
+ "model": "gpt-3.5-turbo",
170
+ "usage": {
171
+ "prompt_tokens": 0,
172
+ "completion_tokens": 0,
173
+ "total_tokens": 0
174
+ },
175
+ "choices": [
176
+ {
177
+ "message": {
178
+ "role": "assistant",
179
+ "content": "Hello! How can I assist you today?"
180
+ },
181
+ "finish_reason": "stop",
182
+ "index": 0
183
+ }
184
+ ]
185
+ }
186
+ ```
187
+ """
188
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
189
+ if optimizer:
190
+ if optimizer in self.__available_optimizers:
191
+ conversation_prompt = getattr(Optimizers, optimizer)(
192
+ conversation_prompt if conversationally else prompt
193
+ )
194
+ else:
195
+ raise Exception(
196
+ f"Optimizer is not one of {self.__available_optimizers}"
197
+ )
198
+
199
+ def for_stream():
200
+ response = self.generate(
201
+ user_message=conversation_prompt,
202
+ response_temperature=self.temperature,
203
+ model_type=self.model_type,
204
+ verbose=False,
205
+ )
206
+ # print(response)
207
+ self.last_response.update(dict(text=response))
208
+ self.conversation.update_chat_history(
209
+ prompt, self.get_message(self.last_response)
210
+ )
211
+ yield dict(text=response) if raw else dict(text=response)
212
+
213
+ def for_non_stream():
214
+ # let's make use of stream
215
+ for _ in for_stream():
216
+ pass
217
+ return self.last_response
218
+
219
+ return for_stream() if stream else for_non_stream()
220
+
221
+ def chat(
222
+ self,
223
+ prompt: str,
224
+ stream: bool = False,
225
+ optimizer: str = None,
226
+ conversationally: bool = False,
227
+ ) -> str:
228
+ """Generate response `str`
229
+ Args:
230
+ prompt (str): Prompt to be send.
231
+ stream (bool, optional): Flag for streaming response. Defaults to False.
232
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
233
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
234
+ Returns:
235
+ str: Response generated
236
+ """
237
+
238
+ def for_stream():
239
+ for response in self.ask(
240
+ prompt, True, optimizer=optimizer, conversationally=conversationally
241
+ ):
242
+ yield self.get_message(response)
243
+
244
+ def for_non_stream():
245
+ return self.get_message(
246
+ self.ask(
247
+ prompt,
248
+ False,
249
+ optimizer=optimizer,
250
+ conversationally=conversationally,
251
+ )
252
+ )
253
+
254
+ return for_stream() if stream else for_non_stream()
255
+
256
+ def get_message(self, response: dict) -> str:
257
+ """Retrieves message only from response
258
+
259
+ Args:
260
+ response (dict): Response generated by `self.ask`
261
+
262
+ Returns:
263
+ str: Message extracted
264
+ """
265
+ assert isinstance(response, dict), "Response should be of dict data-type only"
266
+ return response["text"]
@@ -29,6 +29,7 @@ from .Berlin4h import Berlin4h
29
29
  from .ChatGPTUK import ChatGPTUK
30
30
  from .Poe import POE
31
31
  from .BasedGPT import BasedGPT
32
+ from .Deepseek import DeepSeek
32
33
  __all__ = [
33
34
  'ThinkAnyAI',
34
35
  'Xjai',
@@ -59,4 +60,5 @@ __all__ = [
59
60
  'ChatGPTUK',
60
61
  'POE',
61
62
  'BasedGPT',
63
+ 'DeepSeek',
62
64
  ]
webscout/__init__.py CHANGED
@@ -35,6 +35,7 @@ webai = [
35
35
  "auto",
36
36
  "poe",
37
37
  "basedgpt",
38
+ "deepseek",
38
39
  ]
39
40
 
40
41
  gpt4free_providers = [
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "2.9"
1
+ __version__ = "3.1"
2
2
 
webscout/webai.py CHANGED
@@ -625,7 +625,23 @@ class Main(cmd.Cmd):
625
625
  model=getOr(model, "reka-core"),
626
626
  # quiet=quiet,
627
627
  )
628
+ elif provider == "deepseek":
629
+ from webscout import DeepSeek
628
630
 
631
+ self.bot = DeepSeek(
632
+ api_key=auth,
633
+ is_conversation=disable_conversation,
634
+ max_tokens=max_tokens,
635
+ timeout=timeout,
636
+ intro=intro,
637
+ filepath=filepath,
638
+ update_file=update_file,
639
+ proxies=proxies,
640
+ history_offset=history_offset,
641
+ act=awesome_prompt,
642
+ model=getOr(model, "deepseek_chat"),
643
+ # quiet=quiet,
644
+ )
629
645
  elif provider == "koboldai":
630
646
  from webscout import KOBOLDAI
631
647
 
@@ -0,0 +1,50 @@
1
+ ************************************************
2
+ **** HelpingAI License ****
3
+ ************************************************
4
+
5
+ Version 2.0
6
+
7
+ Developed by Abhay Koul
8
+
9
+ ### Preamble
10
+
11
+ The HelpingAI License governs the use of HelpingAI's digital assets, including but not limited to software, scripts, datasets, documents, images, audio recordings, videos. The HelpingAI License aims to provide clear, comprehensive terms for accessing, modifying, and sharing resources, while promoting ethical development practices.
12
+
13
+ ### Grant of Rights
14
+
15
+ Under the HelpingAI License, HelpingAI grants you the rights to copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Content, provided you comply with the terms and conditions outlined in this document.
16
+
17
+ ### Terms and Conditions
18
+
19
+ To exercise the rights granted in the previous section, you must adhere to the following terms and conditions:
20
+
21
+ 2.1. **Redistribution of Source Code.**
22
+ If you redistribute the Source Code, you must include the complete HelpingAI License with your distribution. You must also add clear notifications in all modified files stating:
23
+
24
+ > "This Work is released under the HelpingAI License v2.0."
25
+
26
+ 2.2. **Distribution in Binary Form.**
27
+ If you distribute Binaries derived from the Source Code, you must include the following statement in your distribution:
28
+
29
+ > "This Work is based on the HelpingAI Licensed Work, under the HelpingAI License v2.0."
30
+
31
+ 2.3. **Notification of Changes.**
32
+ You must clearly indicate any modifications you make to the Source Code or Documentation, including detailed comments about the nature and extent of the changes. Include the date and originator of the modifications.
33
+
34
+ 2.4. **Branding Attribution.**
35
+ You must not remove or alter any HelpingAI branding, logos, or notices included in the Content without explicit prior consent from HelpingAI.
36
+
37
+ 2.5. **Disclaimer of Warranty.**
38
+ The Content is provided "AS IS," without any implied warranties, including but not limited to warranties of merchantability, fitness for a particular purpose, and non-infringement.
39
+
40
+ 2.6. **Limitation of Liability.**
41
+ To the maximum extent permitted by law, neither HelpingAI nor any contributor shall be liable for any loss, personal injury, property damage, or any indirect, special, incidental, or consequential damages arising from or related to the use of the Content.
42
+
43
+ 2.7. **Governing Law.**
44
+ This HelpingAI License shall be governed and construed in accordance with the laws of the jurisdiction where HelpingAI primarily operates.
45
+
46
+ ### Definitions
47
+
48
+ 3.1. **"Source Code"** refers to the preferred form for making modifications to the Content, typically represented by human-readable programming languages, scripts, or documentation formats.
49
+
50
+ 3.2. **"Binaries"** refers to compiled forms of the Source Code, such as executables, libraries, or similar artifacts produced from the Source Code.
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 3.1b0
3
+ Version: 3.3
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
- License: HelpingAI Simplified Universal License
7
+ License: HelpingAI
8
8
  Project-URL: Documentation, https://github.com/OE-LUCIFER/Webscout/wiki
9
9
  Project-URL: Source, https://github.com/OE-LUCIFER/Webscout
10
10
  Project-URL: Tracker, https://github.com/OE-LUCIFER/Webscout/issues
@@ -140,9 +140,11 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
140
140
  - [14. `chatgptuk` - Chat with gemini-pro](#14-chatgptuk---chat-with-gemini-pro)
141
141
  - [15. `poe`- chat with poe](#15-poe--chat-with-poe)
142
142
  - [16. `BasedGPT` - chat with GPT](#16-basedgpt---chat-with-gpt)
143
+ - [17. `DeepSeek` -chat with deepseek](#17-deepseek--chat-with-deepseek)
143
144
  - [`LLM`](#llm)
144
145
  - [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
145
146
  - [`Function-calling-local-llm`](#function-calling-local-llm)
147
+ - [`Local-rawdog`](#local-rawdog)
146
148
  - [`LLM` with internet](#llm-with-internet)
147
149
  - [LLM with deepwebs](#llm-with-deepwebs)
148
150
  - [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
@@ -1218,6 +1220,39 @@ Usage code similar to other proviers
1218
1220
 
1219
1221
  ### 16. `BasedGPT` - chat with GPT
1220
1222
  Usage code similar to other providers
1223
+
1224
+ ### 17. `DeepSeek` -chat with deepseek
1225
+ ```python
1226
+ from webscout import DeepSeek
1227
+ from rich import print
1228
+
1229
+ ai = DeepSeek(
1230
+ is_conversation=True,
1231
+ api_key='', # Watch this video https://youtu.be/Euin6p5Ryks?si=-84JBtyqGwMzvdIq to know from where u can get this key for free
1232
+ max_tokens=800,
1233
+ timeout=30,
1234
+ intro=None,
1235
+ filepath=None,
1236
+ update_file=True,
1237
+ proxies={},
1238
+ history_offset=10250,
1239
+ act=None,
1240
+ model="deepseek_chat"
1241
+ )
1242
+
1243
+ # Start an infinite loop for continuous interaction
1244
+ while True:
1245
+ # Define a prompt to send to the AI
1246
+ prompt = input("Enter your prompt: ")
1247
+
1248
+ # Check if the user wants to exit the loop
1249
+ if prompt.lower() == "exit":
1250
+ break
1251
+
1252
+ # Use the 'chat' method to send the prompt and receive a response
1253
+ r = ai.chat(prompt)
1254
+ print(r)
1255
+ ```
1221
1256
  ### `LLM`
1222
1257
  ```python
1223
1258
  from webscout.LLM import LLM
@@ -1245,6 +1280,7 @@ while True:
1245
1280
  print("AI: ", response)
1246
1281
  ```
1247
1282
  ### `Local-LLM` webscout can now run GGUF models
1283
+ Local LLM's some functions are taken from easy-llama
1248
1284
  ```python
1249
1285
  from webscout.Local.utils import download_model
1250
1286
  from webscout.Local.model import Model
@@ -1336,6 +1372,93 @@ while True:
1336
1372
  response = thread.send(user_input)
1337
1373
  print("Bot: ", response)
1338
1374
  ```
1375
+ ### `Local-rawdog`
1376
+ ```python
1377
+ import webscout.Local as ws
1378
+ from webscout.Local.rawdog import RawDog
1379
+ from webscout.Local.samplers import DefaultSampling
1380
+ from webscout.Local.formats import chatml, AdvancedFormat
1381
+ from webscout.Local.utils import download_model
1382
+ import datetime
1383
+ import sys
1384
+ import os
1385
+
1386
+ repo_id = "YorkieOH10/granite-8b-code-instruct-Q8_0-GGUF"
1387
+ filename = "granite-8b-code-instruct.Q8_0.gguf"
1388
+ model_path = download_model(repo_id, filename, token='')
1389
+
1390
+ # Load the model using the downloaded path
1391
+ model = ws.Model(model_path, n_gpu_layers=10)
1392
+
1393
+ rawdog = RawDog()
1394
+
1395
+ # Create an AdvancedFormat and modify the system content
1396
+ # Use a lambda to generate the prompt dynamically:
1397
+ chat_format = AdvancedFormat(chatml)
1398
+ # **Pre-format the intro_prompt string:**
1399
+ system_content = f"""
1400
+ You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
1401
+
1402
+ A typical interaction goes like this:
1403
+ 1. The user gives you a natural language PROMPT.
1404
+ 2. You:
1405
+ i. Determine what needs to be done
1406
+ ii. Write a short Python SCRIPT to do it
1407
+ iii. Communicate back to the user by printing to the console in that SCRIPT
1408
+ 3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
1409
+ it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
1410
+ 4. In case of exception, regenerate error free script.
1411
+
1412
+ If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
1413
+ This can be useful for summarizing documents or technical readouts, reading instructions before
1414
+ deciding what to do, or other tasks that require multi-step reasoning.
1415
+ A typical 'CONTINUE' interaction looks like this:
1416
+ 1. The user gives you a natural language PROMPT.
1417
+ 2. You:
1418
+ i. Determine what needs to be done
1419
+ ii. Determine that you need to see the output of some subprocess call to complete the task
1420
+ iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
1421
+ 3. The compiler
1422
+ i. Checks and runs your SCRIPT
1423
+ ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
1424
+ iii. Finds the word "CONTINUE" and sends control back to you
1425
+ 4. You again:
1426
+ i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
1427
+ ii. Write a short Python SCRIPT to do it
1428
+ iii. Communicate back to the user by printing to the console in that SCRIPT
1429
+ 5. The compiler...
1430
+
1431
+ Please follow these conventions carefully:
1432
+ - Decline any tasks that seem dangerous, irreversible, or that you don't understand.
1433
+ - Always review the full conversation prior to answering and maintain continuity.
1434
+ - If asked for information, just print the information clearly and concisely.
1435
+ - If asked to do something, print a concise summary of what you've done as confirmation.
1436
+ - If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
1437
+ - If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
1438
+ - Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
1439
+ - Actively clean up any temporary processes or files you use.
1440
+ - When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
1441
+ - You can plot anything with matplotlib.
1442
+ - ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
1443
+ """
1444
+ chat_format.override('system_content', lambda: system_content)
1445
+
1446
+ thread = ws.Thread(model, format=chat_format, sampler=DefaultSampling)
1447
+
1448
+ while True:
1449
+ prompt = input(">: ")
1450
+ if prompt.lower() == "q":
1451
+ break
1452
+
1453
+ response = thread.send(prompt)
1454
+
1455
+ # Process the response using RawDog
1456
+ script_output = rawdog.main(response)
1457
+
1458
+ if script_output:
1459
+ print(script_output)
1460
+
1461
+ ```
1339
1462
  ### `LLM` with internet
1340
1463
  ```python
1341
1464
  from __future__ import annotations
@@ -12,10 +12,10 @@ DeepWEBS/utilsdw/enver.py,sha256=vpI7s4_o_VL9govSryOv-z1zYK3pTEW3-H9QNN8JYtc,247
12
12
  DeepWEBS/utilsdw/logger.py,sha256=Z0nFUcEGyU8r28yKiIyvEtO26xxpmJgbvNToTfwZecc,8174
13
13
  webscout/AIauto.py,sha256=xPGr_Z0h27XXNh4Wiufjn9TksDOqxqlaGcLUYKNP55w,18246
14
14
  webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
15
- webscout/AIutel.py,sha256=5-Is9e-COeh0NX9wkugdctHdzrsjBVZ7lfl2aunt1YI,33272
15
+ webscout/AIutel.py,sha256=Ghe9w1gqnCZTwjc3NzXymSamY3nP0zEep4NiATW32Qk,33454
16
16
  webscout/DWEBS.py,sha256=QT-7-dUgWhQ_H7EVZD53AVyXxyskoPMKCkFIpzkN56Q,7332
17
17
  webscout/LLM.py,sha256=LbGCZdJf8A5dwfoGS4tyy39tAh5BDdhMZP0ScKaaQfU,4184
18
- webscout/__init__.py,sha256=eqHBfAE3psYEi42ZXnbwZG2y3J23F9XZjhoAI0nOKlQ,1856
18
+ webscout/__init__.py,sha256=pOqM5UGmljQN4jTrL3kyrjMv66VuTPyzfNlXZra9KLQ,1872
19
19
  webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
20
20
  webscout/async_providers.py,sha256=holBv5SxanxVXc_92CBBaXHlB2IakB_fHnhyZaFjYF8,684
21
21
  webscout/cli.py,sha256=174iWc0NxwfYMq9vyIk_NNnd3Q8bkzEiCa_BE6a0WZY,18743
@@ -25,16 +25,16 @@ webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
25
25
  webscout/tempid.py,sha256=5oc3UbXhPGKxrMRTfRABT-V-dNzH_hOKWtLYM6iCWd4,5896
26
26
  webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
27
27
  webscout/utils.py,sha256=CxeXvp0rWIulUrEaPZMaNfg_tSuQLRSV8uuHA2chyKE,2603
28
- webscout/version.py,sha256=oUZS6cJqcn6yjv-XNLbwyvxLWIalOba-cFuLM6eoaWU,23
28
+ webscout/version.py,sha256=ZXcHuaEBaIH89v_gIvqcKVULdIIv9r489XLDrqSqDUo,23
29
29
  webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
30
- webscout/webai.py,sha256=GqJs_4KSas9xOvEZ7cDAwo88OVsPoJnJWmnZ68qRQ0g,85324
30
+ webscout/webai.py,sha256=hnRfUI9AT3MgltP68bAmW5Tq4_aWcYytYeTFEsgS7u0,85991
31
31
  webscout/webscout_search.py,sha256=8tDmlskNtIUAM41dqIc387ufC1YunovTm6w5NqeM_yQ,42650
32
32
  webscout/webscout_search_async.py,sha256=ecn9b0J6YtAxMER80iUF1cgn_eh3Ysj7jFpievJzDbE,14471
33
- webscout/Local/__init__.py,sha256=0yXXihFek7VCugUjjCI67i3yZ_PQ8mw3MMVlWGpMmLM,217
34
- webscout/Local/_version.py,sha256=_4faCzosNaazujtNZJP12bI38sKMaj4KxGdcGvcGPdY,83
33
+ webscout/Local/__init__.py,sha256=RN6klpbabPGNX2YzPm_hdeUcQvieUwvJt22uAO2RKSM,238
34
+ webscout/Local/_version.py,sha256=_K1dEsde19FCHSmC1pCe9Ut_PzboJoV9w3ZMY835kqQ,83
35
35
  webscout/Local/formats.py,sha256=BiZZSoN3e8S6-S-ykBL9ogSUs0vK11GaZ3ghc9U8GRk,18994
36
36
  webscout/Local/model.py,sha256=T_bzNNrxEyOyLyhp6fKwiuVBBkXC2a37LzJVCxFIxOU,30710
37
- webscout/Local/rawdog.py,sha256=R7tdsHG8e4n96eODx1z3EArLjKnco098N61Zmp4Xb_8,35595
37
+ webscout/Local/rawdog.py,sha256=LtA7bck2HyvWmovuaG86Iiquiz7XiMcxBlebo9IuGBY,35744
38
38
  webscout/Local/samplers.py,sha256=qXwU4eLXER-2aCYzcJcTgA6BeFmi5GMpTDUX1C9pTN4,4372
39
39
  webscout/Local/thread.py,sha256=Lyf_N2CaGAn2usSWSiUXLPAgpWub8vUu_tgFgtnvZVA,27408
40
40
  webscout/Local/utils.py,sha256=CSt9IqHhVGk_nJEnKvSFbLhC5nNf01e0MtwpgMmF9pA,6197
@@ -43,6 +43,7 @@ webscout/Provider/Berlin4h.py,sha256=zMpmWmdFCbcE3UWB-F9xbbTWZTfx4GnjnRf6sDoaiC0
43
43
  webscout/Provider/Blackboxai.py,sha256=HUk0moEGsgGvidD1LF9tbfaKdx7bPnGU_SrYPdcfHU8,17182
44
44
  webscout/Provider/ChatGPTUK.py,sha256=qmuCb_a71GNE5LelOb5AKJUBndvj7soebiNey4VdDvE,8570
45
45
  webscout/Provider/Cohere.py,sha256=IXnRosYOaMAA65nvsKmN6ZkJGSdZFYQYBidzuNaCqX8,8711
46
+ webscout/Provider/Deepseek.py,sha256=HKsC-ePLSPqcrQbafy-IzR0BNqId3LfiIEhk9j9oTs4,10285
46
47
  webscout/Provider/Gemini.py,sha256=_4DHWvlWuNAmVHPwHB1RjmryjTZZCthLa6lvPEHLvkQ,8451
47
48
  webscout/Provider/Groq.py,sha256=QfgP3hKUcqq5vUA4Pzuu3HAgpJkKwLWNjjsnxtkCYd8,21094
48
49
  webscout/Provider/Koboldai.py,sha256=KwWx2yPlvT9BGx37iNvSbgzWkJ9I8kSOmeg7sL1hb0M,15806
@@ -58,10 +59,10 @@ webscout/Provider/ThinkAnyAI.py,sha256=_qFjj0djxxrranyEY33w14oizyRjzlVwMv_hzvVtw
58
59
  webscout/Provider/Xjai.py,sha256=BIlk2ouz9Kh_0Gg9hPvTqhI7XtcmWdg5vHSX_4uGrIs,9039
59
60
  webscout/Provider/Yepchat.py,sha256=2Eit-A7w1ph1GQKNQuur_yaDzI64r0yBGxCIjDefJxQ,19875
60
61
  webscout/Provider/Youchat.py,sha256=UVGBuGSjv4uRibn1xflmCjYcfrRTKnDvX3adhag6T98,7976
61
- webscout/Provider/__init__.py,sha256=iUgo6NHk8i5i4l4eauq6VXgOlWO_V6Q2FwhnHS64lFA,1457
62
- webscout-3.1b0.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
63
- webscout-3.1b0.dist-info/METADATA,sha256=GckFWn6y4L7GkweHbcgJMWoF4qiaKjozKeQXbEZ5cJk,62007
64
- webscout-3.1b0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
65
- webscout-3.1b0.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
66
- webscout-3.1b0.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
67
- webscout-3.1b0.dist-info/RECORD,,
62
+ webscout/Provider/__init__.py,sha256=lxvdW9dUk7rSjZTUaI4PrfTMGEwktJ_c82iyg-n9LIk,1506
63
+ webscout-3.3.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
64
+ webscout-3.3.dist-info/METADATA,sha256=X8VHG0hDnfdEOxYBPJ5qOLiq7ufjdJPCrLagDBFF8qQ,67184
65
+ webscout-3.3.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
66
+ webscout-3.3.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
67
+ webscout-3.3.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
68
+ webscout-3.3.dist-info/RECORD,,
@@ -1,50 +0,0 @@
1
- ****************************************
2
- **** HelpingAI Simplified Universal License ****
3
- ****************************************
4
-
5
- Version 1.0
6
-
7
- ### Introduction
8
-
9
- This HelpingAI Simplified Universal License (HSUL) governs HelpingAI's content, including computer programs, scripts, datasets, documents, images, audio recordings, videos, and other digital assets. The HSUL provides simple, universal terms for accessing, modifying, and sharing resources while embracing ethical development practices.
10
-
11
- ### Grant of Rights
12
-
13
- Under the HSUL, HelpingAI authorizes you to copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Content, subject to the terms and conditions detailed in this document.
14
-
15
- ### Conditions
16
-
17
- To qualify for the rights granted in section 1, you must strictly adhere to the following conditions:
18
-
19
- 2.1. **Redistributions of Source Code.**
20
- If you redistribute the Source Code, you must include the entire HSUL with your distribution. Furthermore, you must add prominent notifications in all affected files stating:
21
-
22
- > "This Work is released under the HelpingAI Simplified Universal License v1.0."
23
-
24
- 2.2. **Binary Form Redistributions.**
25
- If you distribute Binaries generated from the Source Code, you must ensure the inclusion of the following statement in your distribution:
26
-
27
- > "This Work is based upon the HelpingAI Simplified Universally Licensed Work, under the HelpingAI Simplified Universal License v1.0."
28
-
29
- 2.3. **Notification of Changes.**
30
- Clearly indicate any alterations you introduce to the Source Code or Documentation via prominent comments detailing the nature and scope of the change(s). Reference the date and originator of the modifications.
31
-
32
- 2.4. **Branding Attribution.**
33
- Do not remove or alter any HelpingAI branding, logos, or notices included in the Content without explicit prior consent from HelpingAI.
34
-
35
- 2.5. **Exclusion of Warranty.**
36
- The Content is delivered "AS IS," bereft of any implicit guarantee, including — though not constrained to — warranties pertaining to marketability, applicability for a particular purpose, and non-infringement.
37
-
38
- 2.6. **Limitation of Liability.**
39
- To the maximum extent allowed by law, neither HelpingAI nor any contributor shall bear responsibility for any loss, personal injury, property damage, indirect, special, incidental, or consequential damages stemming from or relating to the Content or its employment.
40
-
41
- 2.7. **Governing Law.**
42
- This HSUL shall be managed and construed according to the laws of the jurisdiction where HelpingAI primarily operates.
43
-
44
- ### Definitions
45
-
46
- 3.1. **"Source Code"** signifies the preferred form for editing the Content, typically represented by human-readable programming languages, scripts, or documentation formats.
47
-
48
- 3.2. **"Binaries"** denote compiled forms of the Source Code, executables, libraries, or similar artifacts built from the Source Code.
49
-
50
- By leveraging this Content, you confirm your approval of the HSUL and pledge to honor its terms and conditions. If you disagree with the HSUL's rules, refrain from engaging with the Content.