webscout 3.1b0__tar.gz → 3.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout-3.3/LICENSE.md +50 -0
- {webscout-3.1b0 → webscout-3.3}/PKG-INFO +125 -2
- {webscout-3.1b0 → webscout-3.3}/README.md +123 -0
- {webscout-3.1b0 → webscout-3.3}/setup.py +2 -2
- {webscout-3.1b0 → webscout-3.3}/webscout/AIutel.py +5 -3
- {webscout-3.1b0 → webscout-3.3}/webscout/Local/__init__.py +1 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Local/_version.py +1 -1
- {webscout-3.1b0 → webscout-3.3}/webscout/Local/rawdog.py +8 -10
- webscout-3.3/webscout/Provider/Deepseek.py +266 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/__init__.py +2 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/__init__.py +1 -0
- webscout-3.3/webscout/version.py +2 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/webai.py +16 -0
- {webscout-3.1b0 → webscout-3.3}/webscout.egg-info/PKG-INFO +125 -2
- {webscout-3.1b0 → webscout-3.3}/webscout.egg-info/SOURCES.txt +1 -0
- webscout-3.1b0/LICENSE.md +0 -50
- webscout-3.1b0/webscout/version.py +0 -2
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/__init__.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/documents/__init__.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/documents/query_results_extractor.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/networks/__init__.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/networks/filepath_converter.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/networks/google_searcher.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/networks/network_configs.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/networks/webpage_fetcher.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/utilsdw/__init__.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/utilsdw/enver.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/DeepWEBS/utilsdw/logger.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/setup.cfg +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/AIauto.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/AIbase.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/DWEBS.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/LLM.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Local/formats.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Local/model.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Local/samplers.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Local/thread.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Local/utils.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/BasedGPT.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Berlin4h.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Blackboxai.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/ChatGPTUK.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Cohere.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Gemini.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Groq.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Koboldai.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Leo.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Llama2.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/OpenGPT.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Openai.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Perplexity.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Phind.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Poe.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Reka.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/ThinkAnyAI.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Xjai.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Yepchat.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/Provider/Youchat.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/__main__.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/async_providers.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/cli.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/exceptions.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/g4f.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/models.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/tempid.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/transcriber.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/utils.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/voice.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/webscout_search.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout/webscout_search_async.py +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout.egg-info/dependency_links.txt +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout.egg-info/entry_points.txt +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout.egg-info/requires.txt +0 -0
- {webscout-3.1b0 → webscout-3.3}/webscout.egg-info/top_level.txt +0 -0
webscout-3.3/LICENSE.md
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
************************************************
|
|
2
|
+
**** HelpingAI License ****
|
|
3
|
+
************************************************
|
|
4
|
+
|
|
5
|
+
Version 2.0
|
|
6
|
+
|
|
7
|
+
Developed by Abhay Koul
|
|
8
|
+
|
|
9
|
+
### Preamble
|
|
10
|
+
|
|
11
|
+
The HelpingAI License governs the use of HelpingAI's digital assets, including but not limited to software, scripts, datasets, documents, images, audio recordings, videos. The HelpingAI License aims to provide clear, comprehensive terms for accessing, modifying, and sharing resources, while promoting ethical development practices.
|
|
12
|
+
|
|
13
|
+
### Grant of Rights
|
|
14
|
+
|
|
15
|
+
Under the HelpingAI License, HelpingAI grants you the rights to copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Content, provided you comply with the terms and conditions outlined in this document.
|
|
16
|
+
|
|
17
|
+
### Terms and Conditions
|
|
18
|
+
|
|
19
|
+
To exercise the rights granted in the previous section, you must adhere to the following terms and conditions:
|
|
20
|
+
|
|
21
|
+
2.1. **Redistribution of Source Code.**
|
|
22
|
+
If you redistribute the Source Code, you must include the complete HelpingAI License with your distribution. You must also add clear notifications in all modified files stating:
|
|
23
|
+
|
|
24
|
+
> "This Work is released under the HelpingAI License v2.0."
|
|
25
|
+
|
|
26
|
+
2.2. **Distribution in Binary Form.**
|
|
27
|
+
If you distribute Binaries derived from the Source Code, you must include the following statement in your distribution:
|
|
28
|
+
|
|
29
|
+
> "This Work is based on the HelpingAI Licensed Work, under the HelpingAI License v2.0."
|
|
30
|
+
|
|
31
|
+
2.3. **Notification of Changes.**
|
|
32
|
+
You must clearly indicate any modifications you make to the Source Code or Documentation, including detailed comments about the nature and extent of the changes. Include the date and originator of the modifications.
|
|
33
|
+
|
|
34
|
+
2.4. **Branding Attribution.**
|
|
35
|
+
You must not remove or alter any HelpingAI branding, logos, or notices included in the Content without explicit prior consent from HelpingAI.
|
|
36
|
+
|
|
37
|
+
2.5. **Disclaimer of Warranty.**
|
|
38
|
+
The Content is provided "AS IS," without any implied warranties, including but not limited to warranties of merchantability, fitness for a particular purpose, and non-infringement.
|
|
39
|
+
|
|
40
|
+
2.6. **Limitation of Liability.**
|
|
41
|
+
To the maximum extent permitted by law, neither HelpingAI nor any contributor shall be liable for any loss, personal injury, property damage, or any indirect, special, incidental, or consequential damages arising from or related to the use of the Content.
|
|
42
|
+
|
|
43
|
+
2.7. **Governing Law.**
|
|
44
|
+
This HelpingAI License shall be governed and construed in accordance with the laws of the jurisdiction where HelpingAI primarily operates.
|
|
45
|
+
|
|
46
|
+
### Definitions
|
|
47
|
+
|
|
48
|
+
3.1. **"Source Code"** refers to the preferred form for making modifications to the Content, typically represented by human-readable programming languages, scripts, or documentation formats.
|
|
49
|
+
|
|
50
|
+
3.2. **"Binaries"** refers to compiled forms of the Source Code, such as executables, libraries, or similar artifacts produced from the Source Code.
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 3.
|
|
3
|
+
Version: 3.3
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
7
|
-
License: HelpingAI
|
|
7
|
+
License: HelpingAI
|
|
8
8
|
Project-URL: Documentation, https://github.com/OE-LUCIFER/Webscout/wiki
|
|
9
9
|
Project-URL: Source, https://github.com/OE-LUCIFER/Webscout
|
|
10
10
|
Project-URL: Tracker, https://github.com/OE-LUCIFER/Webscout/issues
|
|
@@ -140,9 +140,11 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
|
|
|
140
140
|
- [14. `chatgptuk` - Chat with gemini-pro](#14-chatgptuk---chat-with-gemini-pro)
|
|
141
141
|
- [15. `poe`- chat with poe](#15-poe--chat-with-poe)
|
|
142
142
|
- [16. `BasedGPT` - chat with GPT](#16-basedgpt---chat-with-gpt)
|
|
143
|
+
- [17. `DeepSeek` -chat with deepseek](#17-deepseek--chat-with-deepseek)
|
|
143
144
|
- [`LLM`](#llm)
|
|
144
145
|
- [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
|
|
145
146
|
- [`Function-calling-local-llm`](#function-calling-local-llm)
|
|
147
|
+
- [`Local-rawdog`](#local-rawdog)
|
|
146
148
|
- [`LLM` with internet](#llm-with-internet)
|
|
147
149
|
- [LLM with deepwebs](#llm-with-deepwebs)
|
|
148
150
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
@@ -1218,6 +1220,39 @@ Usage code similar to other proviers
|
|
|
1218
1220
|
|
|
1219
1221
|
### 16. `BasedGPT` - chat with GPT
|
|
1220
1222
|
Usage code similar to other providers
|
|
1223
|
+
|
|
1224
|
+
### 17. `DeepSeek` -chat with deepseek
|
|
1225
|
+
```python
|
|
1226
|
+
from webscout import DeepSeek
|
|
1227
|
+
from rich import print
|
|
1228
|
+
|
|
1229
|
+
ai = DeepSeek(
|
|
1230
|
+
is_conversation=True,
|
|
1231
|
+
api_key='', # Watch this video https://youtu.be/Euin6p5Ryks?si=-84JBtyqGwMzvdIq to know from where u can get this key for free
|
|
1232
|
+
max_tokens=800,
|
|
1233
|
+
timeout=30,
|
|
1234
|
+
intro=None,
|
|
1235
|
+
filepath=None,
|
|
1236
|
+
update_file=True,
|
|
1237
|
+
proxies={},
|
|
1238
|
+
history_offset=10250,
|
|
1239
|
+
act=None,
|
|
1240
|
+
model="deepseek_chat"
|
|
1241
|
+
)
|
|
1242
|
+
|
|
1243
|
+
# Start an infinite loop for continuous interaction
|
|
1244
|
+
while True:
|
|
1245
|
+
# Define a prompt to send to the AI
|
|
1246
|
+
prompt = input("Enter your prompt: ")
|
|
1247
|
+
|
|
1248
|
+
# Check if the user wants to exit the loop
|
|
1249
|
+
if prompt.lower() == "exit":
|
|
1250
|
+
break
|
|
1251
|
+
|
|
1252
|
+
# Use the 'chat' method to send the prompt and receive a response
|
|
1253
|
+
r = ai.chat(prompt)
|
|
1254
|
+
print(r)
|
|
1255
|
+
```
|
|
1221
1256
|
### `LLM`
|
|
1222
1257
|
```python
|
|
1223
1258
|
from webscout.LLM import LLM
|
|
@@ -1245,6 +1280,7 @@ while True:
|
|
|
1245
1280
|
print("AI: ", response)
|
|
1246
1281
|
```
|
|
1247
1282
|
### `Local-LLM` webscout can now run GGUF models
|
|
1283
|
+
Local LLM's some functions are taken from easy-llama
|
|
1248
1284
|
```python
|
|
1249
1285
|
from webscout.Local.utils import download_model
|
|
1250
1286
|
from webscout.Local.model import Model
|
|
@@ -1336,6 +1372,93 @@ while True:
|
|
|
1336
1372
|
response = thread.send(user_input)
|
|
1337
1373
|
print("Bot: ", response)
|
|
1338
1374
|
```
|
|
1375
|
+
### `Local-rawdog`
|
|
1376
|
+
```python
|
|
1377
|
+
import webscout.Local as ws
|
|
1378
|
+
from webscout.Local.rawdog import RawDog
|
|
1379
|
+
from webscout.Local.samplers import DefaultSampling
|
|
1380
|
+
from webscout.Local.formats import chatml, AdvancedFormat
|
|
1381
|
+
from webscout.Local.utils import download_model
|
|
1382
|
+
import datetime
|
|
1383
|
+
import sys
|
|
1384
|
+
import os
|
|
1385
|
+
|
|
1386
|
+
repo_id = "YorkieOH10/granite-8b-code-instruct-Q8_0-GGUF"
|
|
1387
|
+
filename = "granite-8b-code-instruct.Q8_0.gguf"
|
|
1388
|
+
model_path = download_model(repo_id, filename, token='')
|
|
1389
|
+
|
|
1390
|
+
# Load the model using the downloaded path
|
|
1391
|
+
model = ws.Model(model_path, n_gpu_layers=10)
|
|
1392
|
+
|
|
1393
|
+
rawdog = RawDog()
|
|
1394
|
+
|
|
1395
|
+
# Create an AdvancedFormat and modify the system content
|
|
1396
|
+
# Use a lambda to generate the prompt dynamically:
|
|
1397
|
+
chat_format = AdvancedFormat(chatml)
|
|
1398
|
+
# **Pre-format the intro_prompt string:**
|
|
1399
|
+
system_content = f"""
|
|
1400
|
+
You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
|
|
1401
|
+
|
|
1402
|
+
A typical interaction goes like this:
|
|
1403
|
+
1. The user gives you a natural language PROMPT.
|
|
1404
|
+
2. You:
|
|
1405
|
+
i. Determine what needs to be done
|
|
1406
|
+
ii. Write a short Python SCRIPT to do it
|
|
1407
|
+
iii. Communicate back to the user by printing to the console in that SCRIPT
|
|
1408
|
+
3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
|
|
1409
|
+
it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
|
|
1410
|
+
4. In case of exception, regenerate error free script.
|
|
1411
|
+
|
|
1412
|
+
If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
|
|
1413
|
+
This can be useful for summarizing documents or technical readouts, reading instructions before
|
|
1414
|
+
deciding what to do, or other tasks that require multi-step reasoning.
|
|
1415
|
+
A typical 'CONTINUE' interaction looks like this:
|
|
1416
|
+
1. The user gives you a natural language PROMPT.
|
|
1417
|
+
2. You:
|
|
1418
|
+
i. Determine what needs to be done
|
|
1419
|
+
ii. Determine that you need to see the output of some subprocess call to complete the task
|
|
1420
|
+
iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
|
|
1421
|
+
3. The compiler
|
|
1422
|
+
i. Checks and runs your SCRIPT
|
|
1423
|
+
ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
|
|
1424
|
+
iii. Finds the word "CONTINUE" and sends control back to you
|
|
1425
|
+
4. You again:
|
|
1426
|
+
i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
|
|
1427
|
+
ii. Write a short Python SCRIPT to do it
|
|
1428
|
+
iii. Communicate back to the user by printing to the console in that SCRIPT
|
|
1429
|
+
5. The compiler...
|
|
1430
|
+
|
|
1431
|
+
Please follow these conventions carefully:
|
|
1432
|
+
- Decline any tasks that seem dangerous, irreversible, or that you don't understand.
|
|
1433
|
+
- Always review the full conversation prior to answering and maintain continuity.
|
|
1434
|
+
- If asked for information, just print the information clearly and concisely.
|
|
1435
|
+
- If asked to do something, print a concise summary of what you've done as confirmation.
|
|
1436
|
+
- If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
|
|
1437
|
+
- If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
|
|
1438
|
+
- Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
|
|
1439
|
+
- Actively clean up any temporary processes or files you use.
|
|
1440
|
+
- When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
|
|
1441
|
+
- You can plot anything with matplotlib.
|
|
1442
|
+
- ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
|
|
1443
|
+
"""
|
|
1444
|
+
chat_format.override('system_content', lambda: system_content)
|
|
1445
|
+
|
|
1446
|
+
thread = ws.Thread(model, format=chat_format, sampler=DefaultSampling)
|
|
1447
|
+
|
|
1448
|
+
while True:
|
|
1449
|
+
prompt = input(">: ")
|
|
1450
|
+
if prompt.lower() == "q":
|
|
1451
|
+
break
|
|
1452
|
+
|
|
1453
|
+
response = thread.send(prompt)
|
|
1454
|
+
|
|
1455
|
+
# Process the response using RawDog
|
|
1456
|
+
script_output = rawdog.main(response)
|
|
1457
|
+
|
|
1458
|
+
if script_output:
|
|
1459
|
+
print(script_output)
|
|
1460
|
+
|
|
1461
|
+
```
|
|
1339
1462
|
### `LLM` with internet
|
|
1340
1463
|
```python
|
|
1341
1464
|
from __future__ import annotations
|
|
@@ -75,9 +75,11 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
|
|
|
75
75
|
- [14. `chatgptuk` - Chat with gemini-pro](#14-chatgptuk---chat-with-gemini-pro)
|
|
76
76
|
- [15. `poe`- chat with poe](#15-poe--chat-with-poe)
|
|
77
77
|
- [16. `BasedGPT` - chat with GPT](#16-basedgpt---chat-with-gpt)
|
|
78
|
+
- [17. `DeepSeek` -chat with deepseek](#17-deepseek--chat-with-deepseek)
|
|
78
79
|
- [`LLM`](#llm)
|
|
79
80
|
- [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
|
|
80
81
|
- [`Function-calling-local-llm`](#function-calling-local-llm)
|
|
82
|
+
- [`Local-rawdog`](#local-rawdog)
|
|
81
83
|
- [`LLM` with internet](#llm-with-internet)
|
|
82
84
|
- [LLM with deepwebs](#llm-with-deepwebs)
|
|
83
85
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
@@ -1153,6 +1155,39 @@ Usage code similar to other proviers
|
|
|
1153
1155
|
|
|
1154
1156
|
### 16. `BasedGPT` - chat with GPT
|
|
1155
1157
|
Usage code similar to other providers
|
|
1158
|
+
|
|
1159
|
+
### 17. `DeepSeek` -chat with deepseek
|
|
1160
|
+
```python
|
|
1161
|
+
from webscout import DeepSeek
|
|
1162
|
+
from rich import print
|
|
1163
|
+
|
|
1164
|
+
ai = DeepSeek(
|
|
1165
|
+
is_conversation=True,
|
|
1166
|
+
api_key='', # Watch this video https://youtu.be/Euin6p5Ryks?si=-84JBtyqGwMzvdIq to know from where u can get this key for free
|
|
1167
|
+
max_tokens=800,
|
|
1168
|
+
timeout=30,
|
|
1169
|
+
intro=None,
|
|
1170
|
+
filepath=None,
|
|
1171
|
+
update_file=True,
|
|
1172
|
+
proxies={},
|
|
1173
|
+
history_offset=10250,
|
|
1174
|
+
act=None,
|
|
1175
|
+
model="deepseek_chat"
|
|
1176
|
+
)
|
|
1177
|
+
|
|
1178
|
+
# Start an infinite loop for continuous interaction
|
|
1179
|
+
while True:
|
|
1180
|
+
# Define a prompt to send to the AI
|
|
1181
|
+
prompt = input("Enter your prompt: ")
|
|
1182
|
+
|
|
1183
|
+
# Check if the user wants to exit the loop
|
|
1184
|
+
if prompt.lower() == "exit":
|
|
1185
|
+
break
|
|
1186
|
+
|
|
1187
|
+
# Use the 'chat' method to send the prompt and receive a response
|
|
1188
|
+
r = ai.chat(prompt)
|
|
1189
|
+
print(r)
|
|
1190
|
+
```
|
|
1156
1191
|
### `LLM`
|
|
1157
1192
|
```python
|
|
1158
1193
|
from webscout.LLM import LLM
|
|
@@ -1180,6 +1215,7 @@ while True:
|
|
|
1180
1215
|
print("AI: ", response)
|
|
1181
1216
|
```
|
|
1182
1217
|
### `Local-LLM` webscout can now run GGUF models
|
|
1218
|
+
Local LLM's some functions are taken from easy-llama
|
|
1183
1219
|
```python
|
|
1184
1220
|
from webscout.Local.utils import download_model
|
|
1185
1221
|
from webscout.Local.model import Model
|
|
@@ -1271,6 +1307,93 @@ while True:
|
|
|
1271
1307
|
response = thread.send(user_input)
|
|
1272
1308
|
print("Bot: ", response)
|
|
1273
1309
|
```
|
|
1310
|
+
### `Local-rawdog`
|
|
1311
|
+
```python
|
|
1312
|
+
import webscout.Local as ws
|
|
1313
|
+
from webscout.Local.rawdog import RawDog
|
|
1314
|
+
from webscout.Local.samplers import DefaultSampling
|
|
1315
|
+
from webscout.Local.formats import chatml, AdvancedFormat
|
|
1316
|
+
from webscout.Local.utils import download_model
|
|
1317
|
+
import datetime
|
|
1318
|
+
import sys
|
|
1319
|
+
import os
|
|
1320
|
+
|
|
1321
|
+
repo_id = "YorkieOH10/granite-8b-code-instruct-Q8_0-GGUF"
|
|
1322
|
+
filename = "granite-8b-code-instruct.Q8_0.gguf"
|
|
1323
|
+
model_path = download_model(repo_id, filename, token='')
|
|
1324
|
+
|
|
1325
|
+
# Load the model using the downloaded path
|
|
1326
|
+
model = ws.Model(model_path, n_gpu_layers=10)
|
|
1327
|
+
|
|
1328
|
+
rawdog = RawDog()
|
|
1329
|
+
|
|
1330
|
+
# Create an AdvancedFormat and modify the system content
|
|
1331
|
+
# Use a lambda to generate the prompt dynamically:
|
|
1332
|
+
chat_format = AdvancedFormat(chatml)
|
|
1333
|
+
# **Pre-format the intro_prompt string:**
|
|
1334
|
+
system_content = f"""
|
|
1335
|
+
You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
|
|
1336
|
+
|
|
1337
|
+
A typical interaction goes like this:
|
|
1338
|
+
1. The user gives you a natural language PROMPT.
|
|
1339
|
+
2. You:
|
|
1340
|
+
i. Determine what needs to be done
|
|
1341
|
+
ii. Write a short Python SCRIPT to do it
|
|
1342
|
+
iii. Communicate back to the user by printing to the console in that SCRIPT
|
|
1343
|
+
3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
|
|
1344
|
+
it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
|
|
1345
|
+
4. In case of exception, regenerate error free script.
|
|
1346
|
+
|
|
1347
|
+
If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
|
|
1348
|
+
This can be useful for summarizing documents or technical readouts, reading instructions before
|
|
1349
|
+
deciding what to do, or other tasks that require multi-step reasoning.
|
|
1350
|
+
A typical 'CONTINUE' interaction looks like this:
|
|
1351
|
+
1. The user gives you a natural language PROMPT.
|
|
1352
|
+
2. You:
|
|
1353
|
+
i. Determine what needs to be done
|
|
1354
|
+
ii. Determine that you need to see the output of some subprocess call to complete the task
|
|
1355
|
+
iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
|
|
1356
|
+
3. The compiler
|
|
1357
|
+
i. Checks and runs your SCRIPT
|
|
1358
|
+
ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
|
|
1359
|
+
iii. Finds the word "CONTINUE" and sends control back to you
|
|
1360
|
+
4. You again:
|
|
1361
|
+
i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
|
|
1362
|
+
ii. Write a short Python SCRIPT to do it
|
|
1363
|
+
iii. Communicate back to the user by printing to the console in that SCRIPT
|
|
1364
|
+
5. The compiler...
|
|
1365
|
+
|
|
1366
|
+
Please follow these conventions carefully:
|
|
1367
|
+
- Decline any tasks that seem dangerous, irreversible, or that you don't understand.
|
|
1368
|
+
- Always review the full conversation prior to answering and maintain continuity.
|
|
1369
|
+
- If asked for information, just print the information clearly and concisely.
|
|
1370
|
+
- If asked to do something, print a concise summary of what you've done as confirmation.
|
|
1371
|
+
- If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
|
|
1372
|
+
- If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
|
|
1373
|
+
- Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
|
|
1374
|
+
- Actively clean up any temporary processes or files you use.
|
|
1375
|
+
- When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
|
|
1376
|
+
- You can plot anything with matplotlib.
|
|
1377
|
+
- ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
|
|
1378
|
+
"""
|
|
1379
|
+
chat_format.override('system_content', lambda: system_content)
|
|
1380
|
+
|
|
1381
|
+
thread = ws.Thread(model, format=chat_format, sampler=DefaultSampling)
|
|
1382
|
+
|
|
1383
|
+
while True:
|
|
1384
|
+
prompt = input(">: ")
|
|
1385
|
+
if prompt.lower() == "q":
|
|
1386
|
+
break
|
|
1387
|
+
|
|
1388
|
+
response = thread.send(prompt)
|
|
1389
|
+
|
|
1390
|
+
# Process the response using RawDog
|
|
1391
|
+
script_output = rawdog.main(response)
|
|
1392
|
+
|
|
1393
|
+
if script_output:
|
|
1394
|
+
print(script_output)
|
|
1395
|
+
|
|
1396
|
+
```
|
|
1274
1397
|
### `LLM` with internet
|
|
1275
1398
|
```python
|
|
1276
1399
|
from __future__ import annotations
|
|
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
|
|
|
5
5
|
|
|
6
6
|
setup(
|
|
7
7
|
name="webscout",
|
|
8
|
-
version="3.
|
|
8
|
+
version="3.3",
|
|
9
9
|
description="Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs",
|
|
10
10
|
long_description=README,
|
|
11
11
|
long_description_content_type="text/markdown",
|
|
@@ -77,7 +77,7 @@ setup(
|
|
|
77
77
|
'huggingface_hub',
|
|
78
78
|
],
|
|
79
79
|
},
|
|
80
|
-
license="HelpingAI
|
|
80
|
+
license="HelpingAI",
|
|
81
81
|
project_urls={
|
|
82
82
|
"Documentation": "https://github.com/OE-LUCIFER/Webscout/wiki",
|
|
83
83
|
"Source": "https://github.com/OE-LUCIFER/Webscout",
|
|
@@ -47,6 +47,7 @@ webai = [
|
|
|
47
47
|
"auto",
|
|
48
48
|
"poe",
|
|
49
49
|
"basedgpt",
|
|
50
|
+
"deepseek",
|
|
50
51
|
]
|
|
51
52
|
gpt4free_providers = [
|
|
52
53
|
provider.__name__ for provider in g4f.Provider.__providers__ # if provider.working
|
|
@@ -213,12 +214,12 @@ class Conversation:
|
|
|
213
214
|
), f"File '{filepath}' does not exist"
|
|
214
215
|
if not os.path.isfile(filepath):
|
|
215
216
|
logging.debug(f"Creating new chat-history file - '{filepath}'")
|
|
216
|
-
with open(filepath, "w") as fh: # Try creating new file
|
|
217
|
+
with open(filepath, "w", encoding='utf-8') as fh: # Try creating new file
|
|
217
218
|
# lets add intro here
|
|
218
219
|
fh.write(self.intro)
|
|
219
220
|
else:
|
|
220
221
|
logging.debug(f"Loading conversation from '{filepath}'")
|
|
221
|
-
with open(filepath) as fh:
|
|
222
|
+
with open(filepath, encoding='utf-8') as fh:
|
|
222
223
|
file_contents = fh.read()
|
|
223
224
|
# Presume intro prompt is part of the file content
|
|
224
225
|
self.chat_history = file_contents
|
|
@@ -269,7 +270,7 @@ class Conversation:
|
|
|
269
270
|
return
|
|
270
271
|
new_history = self.history_format % dict(user=prompt, llm=response)
|
|
271
272
|
if self.file and self.update_file:
|
|
272
|
-
with open(self.file, "a") as fh:
|
|
273
|
+
with open(self.file, "a", encoding='utf-8') as fh:
|
|
273
274
|
fh.write(new_history)
|
|
274
275
|
self.chat_history += new_history
|
|
275
276
|
|
|
@@ -540,6 +541,7 @@ print("The essay is about...")
|
|
|
540
541
|
"""
|
|
541
542
|
if not quiet:
|
|
542
543
|
print(
|
|
544
|
+
"Rawdog is an experimental tool that generates and auto-executes Python scripts in the cli.\n"
|
|
543
545
|
"To get the most out of Rawdog. Ensure the following are installed:\n"
|
|
544
546
|
" 1. Python 3.x\n"
|
|
545
547
|
" 2. Dependency:\n"
|
|
@@ -21,18 +21,16 @@ import logging
|
|
|
21
21
|
import appdirs
|
|
22
22
|
import datetime
|
|
23
23
|
import re
|
|
24
|
-
from .model
|
|
25
|
-
from .utils
|
|
26
|
-
RESET_ALL,
|
|
27
|
-
_SupportsWriteAndFlush,
|
|
28
|
-
cls,
|
|
29
|
-
print_verbose,
|
|
30
|
-
truncate,
|
|
31
|
-
run_system_command
|
|
32
|
-
)
|
|
24
|
+
from .model import Model, assert_model_is_loaded, _SupportsWriteAndFlush
|
|
25
|
+
from .utils import RESET_ALL, cls, print_verbose, truncate
|
|
33
26
|
from .samplers import SamplerSettings, DefaultSampling
|
|
34
|
-
from
|
|
27
|
+
from typing import Optional, Literal, Union
|
|
28
|
+
from .formats import AdvancedFormat
|
|
29
|
+
|
|
35
30
|
from .formats import blank as formats_blank
|
|
31
|
+
from ..AIutel import *
|
|
32
|
+
from .samplers import SamplerSettings, DefaultSampling
|
|
33
|
+
from .formats import AdvancedFormat
|
|
36
34
|
from rich.markdown import Markdown
|
|
37
35
|
from rich.console import Console
|
|
38
36
|
appdir = appdirs.AppDirs("AIWEBS", "vortex")
|
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
from selenium import webdriver
|
|
4
|
+
from selenium.webdriver.chrome.options import Options
|
|
5
|
+
from selenium.webdriver.common.by import By
|
|
6
|
+
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
+
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
+
import click
|
|
9
|
+
import requests
|
|
10
|
+
from requests import get
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
from re import findall
|
|
13
|
+
from requests.exceptions import RequestException
|
|
14
|
+
from curl_cffi.requests import get, RequestsError
|
|
15
|
+
import g4f
|
|
16
|
+
from random import randint
|
|
17
|
+
from PIL import Image
|
|
18
|
+
import io
|
|
19
|
+
import re
|
|
20
|
+
import json
|
|
21
|
+
import yaml
|
|
22
|
+
from ..AIutel import Optimizers
|
|
23
|
+
from ..AIutel import Conversation
|
|
24
|
+
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from ..AIbase import Provider, AsyncProvider
|
|
26
|
+
from Helpingai_T2 import Perplexity
|
|
27
|
+
from webscout import exceptions
|
|
28
|
+
from typing import Any, AsyncGenerator, Dict, Optional
|
|
29
|
+
import logging
|
|
30
|
+
import httpx
|
|
31
|
+
import os
|
|
32
|
+
from dotenv import load_dotenv; load_dotenv()
|
|
33
|
+
|
|
34
|
+
#-----------------------------------------------DeepSeek--------------------------------------------
|
|
35
|
+
class DeepSeek(Provider):
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
api_key: str,
|
|
39
|
+
is_conversation: bool = True,
|
|
40
|
+
max_tokens: int = 600,
|
|
41
|
+
timeout: int = 30,
|
|
42
|
+
intro: str = None,
|
|
43
|
+
filepath: str = None,
|
|
44
|
+
update_file: bool = True,
|
|
45
|
+
proxies: dict = {},
|
|
46
|
+
history_offset: int = 10250,
|
|
47
|
+
act: str = None,
|
|
48
|
+
model: str = 'deepseek_chat',
|
|
49
|
+
temperature: float = 1.0,
|
|
50
|
+
):
|
|
51
|
+
"""Initializes DeepSeek
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
api_key (str): DeepSeek API key.
|
|
55
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
56
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
57
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
58
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
59
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
60
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
61
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
62
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
63
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
64
|
+
model_type (str, optional): DeepSeek model type. Defaults to 'deepseek_chat'.
|
|
65
|
+
temperature (float, optional): Creativity level of the response. Defaults to 1.0.
|
|
66
|
+
"""
|
|
67
|
+
self.api_token = api_key
|
|
68
|
+
self.auth_headers = {
|
|
69
|
+
'Authorization': f'Bearer {self.api_token}'
|
|
70
|
+
}
|
|
71
|
+
self.api_base_url = 'https://chat.deepseek.com/api/v0/chat'
|
|
72
|
+
self.api_session = requests.Session()
|
|
73
|
+
self.api_session.headers.update(self.auth_headers)
|
|
74
|
+
|
|
75
|
+
self.is_conversation = is_conversation
|
|
76
|
+
self.max_tokens_to_sample = max_tokens
|
|
77
|
+
self.timeout = timeout
|
|
78
|
+
self.last_response = {}
|
|
79
|
+
self.model_type = model
|
|
80
|
+
self.temperature = temperature
|
|
81
|
+
self.__available_optimizers = (
|
|
82
|
+
method
|
|
83
|
+
for method in dir(Optimizers)
|
|
84
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
85
|
+
)
|
|
86
|
+
Conversation.intro = (
|
|
87
|
+
AwesomePrompts().get_act(
|
|
88
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
89
|
+
)
|
|
90
|
+
if act
|
|
91
|
+
else intro or Conversation.intro
|
|
92
|
+
)
|
|
93
|
+
self.conversation = Conversation(
|
|
94
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
95
|
+
)
|
|
96
|
+
self.conversation.history_offset = history_offset
|
|
97
|
+
# self.session.proxies = proxies
|
|
98
|
+
|
|
99
|
+
def clear_chat(self) -> None:
|
|
100
|
+
"""
|
|
101
|
+
Clears the chat context by making a POST request to the clear_context endpoint.
|
|
102
|
+
"""
|
|
103
|
+
clear_payload = {"model_class": "deepseek_chat", "append_welcome_message": False}
|
|
104
|
+
clear_response = self.api_session.post(f'{self.api_base_url}/clear_context', json=clear_payload)
|
|
105
|
+
clear_response.raise_for_status() # Raises an HTTPError if the HTTP request returned an unsuccessful status code
|
|
106
|
+
|
|
107
|
+
def generate(self, user_message: str, response_temperature: float = 1.0, model_type: Optional[str] = "deepseek_chat", verbose: bool = False) -> str:
|
|
108
|
+
"""
|
|
109
|
+
Generates a response from the DeepSeek API based on the provided message.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
user_message (str): The message to send to the chat API.
|
|
113
|
+
response_temperature (float, optional): The creativity level of the response. Defaults to 1.0.
|
|
114
|
+
model_type (str, optional): The model class to be used for the chat session.
|
|
115
|
+
verbose (bool, optional): Whether to print the response content. Defaults to False.
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
str: The concatenated response content received from the API.
|
|
119
|
+
|
|
120
|
+
Available models:
|
|
121
|
+
- deepseek_chat
|
|
122
|
+
- deepseek_code
|
|
123
|
+
"""
|
|
124
|
+
request_payload = {
|
|
125
|
+
"message": user_message,
|
|
126
|
+
"stream": True,
|
|
127
|
+
"model_preference": None,
|
|
128
|
+
"model_class": model_type,
|
|
129
|
+
"temperature": response_temperature
|
|
130
|
+
}
|
|
131
|
+
api_response = self.api_session.post(f'{self.api_base_url}/completions', json=request_payload, stream=True)
|
|
132
|
+
api_response.raise_for_status()
|
|
133
|
+
|
|
134
|
+
combined_response = ""
|
|
135
|
+
for response_line in api_response.iter_lines(decode_unicode=True, chunk_size=1):
|
|
136
|
+
if response_line:
|
|
137
|
+
cleaned_line = re.sub("data:", "", response_line)
|
|
138
|
+
response_json = json.loads(cleaned_line)
|
|
139
|
+
response_content = response_json['choices'][0]['delta']['content']
|
|
140
|
+
if response_content and not re.match(r'^\s{5,}$', response_content):
|
|
141
|
+
if verbose: print(response_content, end="", flush=True)
|
|
142
|
+
combined_response += response_content
|
|
143
|
+
|
|
144
|
+
return combined_response
|
|
145
|
+
|
|
146
|
+
def ask(
|
|
147
|
+
self,
|
|
148
|
+
prompt: str,
|
|
149
|
+
stream: bool = False,
|
|
150
|
+
raw: bool = False,
|
|
151
|
+
optimizer: str = None,
|
|
152
|
+
conversationally: bool = False,
|
|
153
|
+
) -> dict:
|
|
154
|
+
"""Chat with AI
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
prompt (str): Prompt to be send.
|
|
158
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
159
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
160
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
161
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
162
|
+
Returns:
|
|
163
|
+
dict : {}
|
|
164
|
+
```json
|
|
165
|
+
{
|
|
166
|
+
"id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
|
|
167
|
+
"object": "chat.completion",
|
|
168
|
+
"created": 1704623244,
|
|
169
|
+
"model": "gpt-3.5-turbo",
|
|
170
|
+
"usage": {
|
|
171
|
+
"prompt_tokens": 0,
|
|
172
|
+
"completion_tokens": 0,
|
|
173
|
+
"total_tokens": 0
|
|
174
|
+
},
|
|
175
|
+
"choices": [
|
|
176
|
+
{
|
|
177
|
+
"message": {
|
|
178
|
+
"role": "assistant",
|
|
179
|
+
"content": "Hello! How can I assist you today?"
|
|
180
|
+
},
|
|
181
|
+
"finish_reason": "stop",
|
|
182
|
+
"index": 0
|
|
183
|
+
}
|
|
184
|
+
]
|
|
185
|
+
}
|
|
186
|
+
```
|
|
187
|
+
"""
|
|
188
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
189
|
+
if optimizer:
|
|
190
|
+
if optimizer in self.__available_optimizers:
|
|
191
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
192
|
+
conversation_prompt if conversationally else prompt
|
|
193
|
+
)
|
|
194
|
+
else:
|
|
195
|
+
raise Exception(
|
|
196
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
def for_stream():
|
|
200
|
+
response = self.generate(
|
|
201
|
+
user_message=conversation_prompt,
|
|
202
|
+
response_temperature=self.temperature,
|
|
203
|
+
model_type=self.model_type,
|
|
204
|
+
verbose=False,
|
|
205
|
+
)
|
|
206
|
+
# print(response)
|
|
207
|
+
self.last_response.update(dict(text=response))
|
|
208
|
+
self.conversation.update_chat_history(
|
|
209
|
+
prompt, self.get_message(self.last_response)
|
|
210
|
+
)
|
|
211
|
+
yield dict(text=response) if raw else dict(text=response)
|
|
212
|
+
|
|
213
|
+
def for_non_stream():
|
|
214
|
+
# let's make use of stream
|
|
215
|
+
for _ in for_stream():
|
|
216
|
+
pass
|
|
217
|
+
return self.last_response
|
|
218
|
+
|
|
219
|
+
return for_stream() if stream else for_non_stream()
|
|
220
|
+
|
|
221
|
+
def chat(
|
|
222
|
+
self,
|
|
223
|
+
prompt: str,
|
|
224
|
+
stream: bool = False,
|
|
225
|
+
optimizer: str = None,
|
|
226
|
+
conversationally: bool = False,
|
|
227
|
+
) -> str:
|
|
228
|
+
"""Generate response `str`
|
|
229
|
+
Args:
|
|
230
|
+
prompt (str): Prompt to be send.
|
|
231
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
232
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
233
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
234
|
+
Returns:
|
|
235
|
+
str: Response generated
|
|
236
|
+
"""
|
|
237
|
+
|
|
238
|
+
def for_stream():
|
|
239
|
+
for response in self.ask(
|
|
240
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
241
|
+
):
|
|
242
|
+
yield self.get_message(response)
|
|
243
|
+
|
|
244
|
+
def for_non_stream():
|
|
245
|
+
return self.get_message(
|
|
246
|
+
self.ask(
|
|
247
|
+
prompt,
|
|
248
|
+
False,
|
|
249
|
+
optimizer=optimizer,
|
|
250
|
+
conversationally=conversationally,
|
|
251
|
+
)
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
return for_stream() if stream else for_non_stream()
|
|
255
|
+
|
|
256
|
+
def get_message(self, response: dict) -> str:
|
|
257
|
+
"""Retrieves message only from response
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
response (dict): Response generated by `self.ask`
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
str: Message extracted
|
|
264
|
+
"""
|
|
265
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
266
|
+
return response["text"]
|
|
@@ -29,6 +29,7 @@ from .Berlin4h import Berlin4h
|
|
|
29
29
|
from .ChatGPTUK import ChatGPTUK
|
|
30
30
|
from .Poe import POE
|
|
31
31
|
from .BasedGPT import BasedGPT
|
|
32
|
+
from .Deepseek import DeepSeek
|
|
32
33
|
__all__ = [
|
|
33
34
|
'ThinkAnyAI',
|
|
34
35
|
'Xjai',
|
|
@@ -59,4 +60,5 @@ __all__ = [
|
|
|
59
60
|
'ChatGPTUK',
|
|
60
61
|
'POE',
|
|
61
62
|
'BasedGPT',
|
|
63
|
+
'DeepSeek',
|
|
62
64
|
]
|
|
@@ -625,7 +625,23 @@ class Main(cmd.Cmd):
|
|
|
625
625
|
model=getOr(model, "reka-core"),
|
|
626
626
|
# quiet=quiet,
|
|
627
627
|
)
|
|
628
|
+
elif provider == "deepseek":
|
|
629
|
+
from webscout import DeepSeek
|
|
628
630
|
|
|
631
|
+
self.bot = DeepSeek(
|
|
632
|
+
api_key=auth,
|
|
633
|
+
is_conversation=disable_conversation,
|
|
634
|
+
max_tokens=max_tokens,
|
|
635
|
+
timeout=timeout,
|
|
636
|
+
intro=intro,
|
|
637
|
+
filepath=filepath,
|
|
638
|
+
update_file=update_file,
|
|
639
|
+
proxies=proxies,
|
|
640
|
+
history_offset=history_offset,
|
|
641
|
+
act=awesome_prompt,
|
|
642
|
+
model=getOr(model, "deepseek_chat"),
|
|
643
|
+
# quiet=quiet,
|
|
644
|
+
)
|
|
629
645
|
elif provider == "koboldai":
|
|
630
646
|
from webscout import KOBOLDAI
|
|
631
647
|
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 3.
|
|
3
|
+
Version: 3.3
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
7
|
-
License: HelpingAI
|
|
7
|
+
License: HelpingAI
|
|
8
8
|
Project-URL: Documentation, https://github.com/OE-LUCIFER/Webscout/wiki
|
|
9
9
|
Project-URL: Source, https://github.com/OE-LUCIFER/Webscout
|
|
10
10
|
Project-URL: Tracker, https://github.com/OE-LUCIFER/Webscout/issues
|
|
@@ -140,9 +140,11 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
|
|
|
140
140
|
- [14. `chatgptuk` - Chat with gemini-pro](#14-chatgptuk---chat-with-gemini-pro)
|
|
141
141
|
- [15. `poe`- chat with poe](#15-poe--chat-with-poe)
|
|
142
142
|
- [16. `BasedGPT` - chat with GPT](#16-basedgpt---chat-with-gpt)
|
|
143
|
+
- [17. `DeepSeek` -chat with deepseek](#17-deepseek--chat-with-deepseek)
|
|
143
144
|
- [`LLM`](#llm)
|
|
144
145
|
- [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
|
|
145
146
|
- [`Function-calling-local-llm`](#function-calling-local-llm)
|
|
147
|
+
- [`Local-rawdog`](#local-rawdog)
|
|
146
148
|
- [`LLM` with internet](#llm-with-internet)
|
|
147
149
|
- [LLM with deepwebs](#llm-with-deepwebs)
|
|
148
150
|
- [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
|
|
@@ -1218,6 +1220,39 @@ Usage code similar to other proviers
|
|
|
1218
1220
|
|
|
1219
1221
|
### 16. `BasedGPT` - chat with GPT
|
|
1220
1222
|
Usage code similar to other providers
|
|
1223
|
+
|
|
1224
|
+
### 17. `DeepSeek` -chat with deepseek
|
|
1225
|
+
```python
|
|
1226
|
+
from webscout import DeepSeek
|
|
1227
|
+
from rich import print
|
|
1228
|
+
|
|
1229
|
+
ai = DeepSeek(
|
|
1230
|
+
is_conversation=True,
|
|
1231
|
+
api_key='', # Watch this video https://youtu.be/Euin6p5Ryks?si=-84JBtyqGwMzvdIq to know from where u can get this key for free
|
|
1232
|
+
max_tokens=800,
|
|
1233
|
+
timeout=30,
|
|
1234
|
+
intro=None,
|
|
1235
|
+
filepath=None,
|
|
1236
|
+
update_file=True,
|
|
1237
|
+
proxies={},
|
|
1238
|
+
history_offset=10250,
|
|
1239
|
+
act=None,
|
|
1240
|
+
model="deepseek_chat"
|
|
1241
|
+
)
|
|
1242
|
+
|
|
1243
|
+
# Start an infinite loop for continuous interaction
|
|
1244
|
+
while True:
|
|
1245
|
+
# Define a prompt to send to the AI
|
|
1246
|
+
prompt = input("Enter your prompt: ")
|
|
1247
|
+
|
|
1248
|
+
# Check if the user wants to exit the loop
|
|
1249
|
+
if prompt.lower() == "exit":
|
|
1250
|
+
break
|
|
1251
|
+
|
|
1252
|
+
# Use the 'chat' method to send the prompt and receive a response
|
|
1253
|
+
r = ai.chat(prompt)
|
|
1254
|
+
print(r)
|
|
1255
|
+
```
|
|
1221
1256
|
### `LLM`
|
|
1222
1257
|
```python
|
|
1223
1258
|
from webscout.LLM import LLM
|
|
@@ -1245,6 +1280,7 @@ while True:
|
|
|
1245
1280
|
print("AI: ", response)
|
|
1246
1281
|
```
|
|
1247
1282
|
### `Local-LLM` webscout can now run GGUF models
|
|
1283
|
+
Local LLM's some functions are taken from easy-llama
|
|
1248
1284
|
```python
|
|
1249
1285
|
from webscout.Local.utils import download_model
|
|
1250
1286
|
from webscout.Local.model import Model
|
|
@@ -1336,6 +1372,93 @@ while True:
|
|
|
1336
1372
|
response = thread.send(user_input)
|
|
1337
1373
|
print("Bot: ", response)
|
|
1338
1374
|
```
|
|
1375
|
+
### `Local-rawdog`
|
|
1376
|
+
```python
|
|
1377
|
+
import webscout.Local as ws
|
|
1378
|
+
from webscout.Local.rawdog import RawDog
|
|
1379
|
+
from webscout.Local.samplers import DefaultSampling
|
|
1380
|
+
from webscout.Local.formats import chatml, AdvancedFormat
|
|
1381
|
+
from webscout.Local.utils import download_model
|
|
1382
|
+
import datetime
|
|
1383
|
+
import sys
|
|
1384
|
+
import os
|
|
1385
|
+
|
|
1386
|
+
repo_id = "YorkieOH10/granite-8b-code-instruct-Q8_0-GGUF"
|
|
1387
|
+
filename = "granite-8b-code-instruct.Q8_0.gguf"
|
|
1388
|
+
model_path = download_model(repo_id, filename, token='')
|
|
1389
|
+
|
|
1390
|
+
# Load the model using the downloaded path
|
|
1391
|
+
model = ws.Model(model_path, n_gpu_layers=10)
|
|
1392
|
+
|
|
1393
|
+
rawdog = RawDog()
|
|
1394
|
+
|
|
1395
|
+
# Create an AdvancedFormat and modify the system content
|
|
1396
|
+
# Use a lambda to generate the prompt dynamically:
|
|
1397
|
+
chat_format = AdvancedFormat(chatml)
|
|
1398
|
+
# **Pre-format the intro_prompt string:**
|
|
1399
|
+
system_content = f"""
|
|
1400
|
+
You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
|
|
1401
|
+
|
|
1402
|
+
A typical interaction goes like this:
|
|
1403
|
+
1. The user gives you a natural language PROMPT.
|
|
1404
|
+
2. You:
|
|
1405
|
+
i. Determine what needs to be done
|
|
1406
|
+
ii. Write a short Python SCRIPT to do it
|
|
1407
|
+
iii. Communicate back to the user by printing to the console in that SCRIPT
|
|
1408
|
+
3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
|
|
1409
|
+
it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
|
|
1410
|
+
4. In case of exception, regenerate error free script.
|
|
1411
|
+
|
|
1412
|
+
If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
|
|
1413
|
+
This can be useful for summarizing documents or technical readouts, reading instructions before
|
|
1414
|
+
deciding what to do, or other tasks that require multi-step reasoning.
|
|
1415
|
+
A typical 'CONTINUE' interaction looks like this:
|
|
1416
|
+
1. The user gives you a natural language PROMPT.
|
|
1417
|
+
2. You:
|
|
1418
|
+
i. Determine what needs to be done
|
|
1419
|
+
ii. Determine that you need to see the output of some subprocess call to complete the task
|
|
1420
|
+
iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
|
|
1421
|
+
3. The compiler
|
|
1422
|
+
i. Checks and runs your SCRIPT
|
|
1423
|
+
ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
|
|
1424
|
+
iii. Finds the word "CONTINUE" and sends control back to you
|
|
1425
|
+
4. You again:
|
|
1426
|
+
i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
|
|
1427
|
+
ii. Write a short Python SCRIPT to do it
|
|
1428
|
+
iii. Communicate back to the user by printing to the console in that SCRIPT
|
|
1429
|
+
5. The compiler...
|
|
1430
|
+
|
|
1431
|
+
Please follow these conventions carefully:
|
|
1432
|
+
- Decline any tasks that seem dangerous, irreversible, or that you don't understand.
|
|
1433
|
+
- Always review the full conversation prior to answering and maintain continuity.
|
|
1434
|
+
- If asked for information, just print the information clearly and concisely.
|
|
1435
|
+
- If asked to do something, print a concise summary of what you've done as confirmation.
|
|
1436
|
+
- If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
|
|
1437
|
+
- If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
|
|
1438
|
+
- Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
|
|
1439
|
+
- Actively clean up any temporary processes or files you use.
|
|
1440
|
+
- When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
|
|
1441
|
+
- You can plot anything with matplotlib.
|
|
1442
|
+
- ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
|
|
1443
|
+
"""
|
|
1444
|
+
chat_format.override('system_content', lambda: system_content)
|
|
1445
|
+
|
|
1446
|
+
thread = ws.Thread(model, format=chat_format, sampler=DefaultSampling)
|
|
1447
|
+
|
|
1448
|
+
while True:
|
|
1449
|
+
prompt = input(">: ")
|
|
1450
|
+
if prompt.lower() == "q":
|
|
1451
|
+
break
|
|
1452
|
+
|
|
1453
|
+
response = thread.send(prompt)
|
|
1454
|
+
|
|
1455
|
+
# Process the response using RawDog
|
|
1456
|
+
script_output = rawdog.main(response)
|
|
1457
|
+
|
|
1458
|
+
if script_output:
|
|
1459
|
+
print(script_output)
|
|
1460
|
+
|
|
1461
|
+
```
|
|
1339
1462
|
### `LLM` with internet
|
|
1340
1463
|
```python
|
|
1341
1464
|
from __future__ import annotations
|
webscout-3.1b0/LICENSE.md
DELETED
|
@@ -1,50 +0,0 @@
|
|
|
1
|
-
****************************************
|
|
2
|
-
**** HelpingAI Simplified Universal License ****
|
|
3
|
-
****************************************
|
|
4
|
-
|
|
5
|
-
Version 1.0
|
|
6
|
-
|
|
7
|
-
### Introduction
|
|
8
|
-
|
|
9
|
-
This HelpingAI Simplified Universal License (HSUL) governs HelpingAI's content, including computer programs, scripts, datasets, documents, images, audio recordings, videos, and other digital assets. The HSUL provides simple, universal terms for accessing, modifying, and sharing resources while embracing ethical development practices.
|
|
10
|
-
|
|
11
|
-
### Grant of Rights
|
|
12
|
-
|
|
13
|
-
Under the HSUL, HelpingAI authorizes you to copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Content, subject to the terms and conditions detailed in this document.
|
|
14
|
-
|
|
15
|
-
### Conditions
|
|
16
|
-
|
|
17
|
-
To qualify for the rights granted in section 1, you must strictly adhere to the following conditions:
|
|
18
|
-
|
|
19
|
-
2.1. **Redistributions of Source Code.**
|
|
20
|
-
If you redistribute the Source Code, you must include the entire HSUL with your distribution. Furthermore, you must add prominent notifications in all affected files stating:
|
|
21
|
-
|
|
22
|
-
> "This Work is released under the HelpingAI Simplified Universal License v1.0."
|
|
23
|
-
|
|
24
|
-
2.2. **Binary Form Redistributions.**
|
|
25
|
-
If you distribute Binaries generated from the Source Code, you must ensure the inclusion of the following statement in your distribution:
|
|
26
|
-
|
|
27
|
-
> "This Work is based upon the HelpingAI Simplified Universally Licensed Work, under the HelpingAI Simplified Universal License v1.0."
|
|
28
|
-
|
|
29
|
-
2.3. **Notification of Changes.**
|
|
30
|
-
Clearly indicate any alterations you introduce to the Source Code or Documentation via prominent comments detailing the nature and scope of the change(s). Reference the date and originator of the modifications.
|
|
31
|
-
|
|
32
|
-
2.4. **Branding Attribution.**
|
|
33
|
-
Do not remove or alter any HelpingAI branding, logos, or notices included in the Content without explicit prior consent from HelpingAI.
|
|
34
|
-
|
|
35
|
-
2.5. **Exclusion of Warranty.**
|
|
36
|
-
The Content is delivered "AS IS," bereft of any implicit guarantee, including — though not constrained to — warranties pertaining to marketability, applicability for a particular purpose, and non-infringement.
|
|
37
|
-
|
|
38
|
-
2.6. **Limitation of Liability.**
|
|
39
|
-
To the maximum extent allowed by law, neither HelpingAI nor any contributor shall bear responsibility for any loss, personal injury, property damage, indirect, special, incidental, or consequential damages stemming from or relating to the Content or its employment.
|
|
40
|
-
|
|
41
|
-
2.7. **Governing Law.**
|
|
42
|
-
This HSUL shall be managed and construed according to the laws of the jurisdiction where HelpingAI primarily operates.
|
|
43
|
-
|
|
44
|
-
### Definitions
|
|
45
|
-
|
|
46
|
-
3.1. **"Source Code"** signifies the preferred form for editing the Content, typically represented by human-readable programming languages, scripts, or documentation formats.
|
|
47
|
-
|
|
48
|
-
3.2. **"Binaries"** denote compiled forms of the Source Code, executables, libraries, or similar artifacts built from the Source Code.
|
|
49
|
-
|
|
50
|
-
By leveraging this Content, you confirm your approval of the HSUL and pledge to honor its terms and conditions. If you disagree with the HSUL's rules, refrain from engaging with the Content.
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|