webscout 3.0__tar.gz → 3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (73) hide show
  1. {webscout-3.0 → webscout-3.1}/PKG-INFO +90 -1
  2. {webscout-3.0 → webscout-3.1}/README.md +89 -0
  3. {webscout-3.0 → webscout-3.1}/setup.py +1 -1
  4. {webscout-3.0 → webscout-3.1}/webscout/Local/__init__.py +1 -0
  5. {webscout-3.0 → webscout-3.1}/webscout/Local/_version.py +1 -1
  6. webscout-3.1/webscout/Local/rawdog.py +946 -0
  7. webscout-3.1/webscout/version.py +2 -0
  8. {webscout-3.0 → webscout-3.1}/webscout.egg-info/PKG-INFO +90 -1
  9. {webscout-3.0 → webscout-3.1}/webscout.egg-info/SOURCES.txt +1 -0
  10. webscout-3.0/webscout/version.py +0 -2
  11. {webscout-3.0 → webscout-3.1}/DeepWEBS/__init__.py +0 -0
  12. {webscout-3.0 → webscout-3.1}/DeepWEBS/documents/__init__.py +0 -0
  13. {webscout-3.0 → webscout-3.1}/DeepWEBS/documents/query_results_extractor.py +0 -0
  14. {webscout-3.0 → webscout-3.1}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
  15. {webscout-3.0 → webscout-3.1}/DeepWEBS/networks/__init__.py +0 -0
  16. {webscout-3.0 → webscout-3.1}/DeepWEBS/networks/filepath_converter.py +0 -0
  17. {webscout-3.0 → webscout-3.1}/DeepWEBS/networks/google_searcher.py +0 -0
  18. {webscout-3.0 → webscout-3.1}/DeepWEBS/networks/network_configs.py +0 -0
  19. {webscout-3.0 → webscout-3.1}/DeepWEBS/networks/webpage_fetcher.py +0 -0
  20. {webscout-3.0 → webscout-3.1}/DeepWEBS/utilsdw/__init__.py +0 -0
  21. {webscout-3.0 → webscout-3.1}/DeepWEBS/utilsdw/enver.py +0 -0
  22. {webscout-3.0 → webscout-3.1}/DeepWEBS/utilsdw/logger.py +0 -0
  23. {webscout-3.0 → webscout-3.1}/LICENSE.md +0 -0
  24. {webscout-3.0 → webscout-3.1}/setup.cfg +0 -0
  25. {webscout-3.0 → webscout-3.1}/webscout/AIauto.py +0 -0
  26. {webscout-3.0 → webscout-3.1}/webscout/AIbase.py +0 -0
  27. {webscout-3.0 → webscout-3.1}/webscout/AIutel.py +0 -0
  28. {webscout-3.0 → webscout-3.1}/webscout/DWEBS.py +0 -0
  29. {webscout-3.0 → webscout-3.1}/webscout/LLM.py +0 -0
  30. {webscout-3.0 → webscout-3.1}/webscout/Local/formats.py +0 -0
  31. {webscout-3.0 → webscout-3.1}/webscout/Local/model.py +0 -0
  32. {webscout-3.0 → webscout-3.1}/webscout/Local/samplers.py +0 -0
  33. {webscout-3.0 → webscout-3.1}/webscout/Local/thread.py +0 -0
  34. {webscout-3.0 → webscout-3.1}/webscout/Local/utils.py +0 -0
  35. {webscout-3.0 → webscout-3.1}/webscout/Provider/BasedGPT.py +0 -0
  36. {webscout-3.0 → webscout-3.1}/webscout/Provider/Berlin4h.py +0 -0
  37. {webscout-3.0 → webscout-3.1}/webscout/Provider/Blackboxai.py +0 -0
  38. {webscout-3.0 → webscout-3.1}/webscout/Provider/ChatGPTUK.py +0 -0
  39. {webscout-3.0 → webscout-3.1}/webscout/Provider/Cohere.py +0 -0
  40. {webscout-3.0 → webscout-3.1}/webscout/Provider/Gemini.py +0 -0
  41. {webscout-3.0 → webscout-3.1}/webscout/Provider/Groq.py +0 -0
  42. {webscout-3.0 → webscout-3.1}/webscout/Provider/Koboldai.py +0 -0
  43. {webscout-3.0 → webscout-3.1}/webscout/Provider/Leo.py +0 -0
  44. {webscout-3.0 → webscout-3.1}/webscout/Provider/Llama2.py +0 -0
  45. {webscout-3.0 → webscout-3.1}/webscout/Provider/OpenGPT.py +0 -0
  46. {webscout-3.0 → webscout-3.1}/webscout/Provider/Openai.py +0 -0
  47. {webscout-3.0 → webscout-3.1}/webscout/Provider/Perplexity.py +0 -0
  48. {webscout-3.0 → webscout-3.1}/webscout/Provider/Phind.py +0 -0
  49. {webscout-3.0 → webscout-3.1}/webscout/Provider/Poe.py +0 -0
  50. {webscout-3.0 → webscout-3.1}/webscout/Provider/Reka.py +0 -0
  51. {webscout-3.0 → webscout-3.1}/webscout/Provider/ThinkAnyAI.py +0 -0
  52. {webscout-3.0 → webscout-3.1}/webscout/Provider/Xjai.py +0 -0
  53. {webscout-3.0 → webscout-3.1}/webscout/Provider/Yepchat.py +0 -0
  54. {webscout-3.0 → webscout-3.1}/webscout/Provider/Youchat.py +0 -0
  55. {webscout-3.0 → webscout-3.1}/webscout/Provider/__init__.py +0 -0
  56. {webscout-3.0 → webscout-3.1}/webscout/__init__.py +0 -0
  57. {webscout-3.0 → webscout-3.1}/webscout/__main__.py +0 -0
  58. {webscout-3.0 → webscout-3.1}/webscout/async_providers.py +0 -0
  59. {webscout-3.0 → webscout-3.1}/webscout/cli.py +0 -0
  60. {webscout-3.0 → webscout-3.1}/webscout/exceptions.py +0 -0
  61. {webscout-3.0 → webscout-3.1}/webscout/g4f.py +0 -0
  62. {webscout-3.0 → webscout-3.1}/webscout/models.py +0 -0
  63. {webscout-3.0 → webscout-3.1}/webscout/tempid.py +0 -0
  64. {webscout-3.0 → webscout-3.1}/webscout/transcriber.py +0 -0
  65. {webscout-3.0 → webscout-3.1}/webscout/utils.py +0 -0
  66. {webscout-3.0 → webscout-3.1}/webscout/voice.py +0 -0
  67. {webscout-3.0 → webscout-3.1}/webscout/webai.py +0 -0
  68. {webscout-3.0 → webscout-3.1}/webscout/webscout_search.py +0 -0
  69. {webscout-3.0 → webscout-3.1}/webscout/webscout_search_async.py +0 -0
  70. {webscout-3.0 → webscout-3.1}/webscout.egg-info/dependency_links.txt +0 -0
  71. {webscout-3.0 → webscout-3.1}/webscout.egg-info/entry_points.txt +0 -0
  72. {webscout-3.0 → webscout-3.1}/webscout.egg-info/requires.txt +0 -0
  73. {webscout-3.0 → webscout-3.1}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 3.0
3
+ Version: 3.1
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -143,6 +143,7 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
143
143
  - [`LLM`](#llm)
144
144
  - [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
145
145
  - [`Function-calling-local-llm`](#function-calling-local-llm)
146
+ - [`Local-rawdog`](#local-rawdog)
146
147
  - [`LLM` with internet](#llm-with-internet)
147
148
  - [LLM with deepwebs](#llm-with-deepwebs)
148
149
  - [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
@@ -1245,6 +1246,7 @@ while True:
1245
1246
  print("AI: ", response)
1246
1247
  ```
1247
1248
  ### `Local-LLM` webscout can now run GGUF models
1249
+ Local LLM's some functions are taken from easy-llama
1248
1250
  ```python
1249
1251
  from webscout.Local.utils import download_model
1250
1252
  from webscout.Local.model import Model
@@ -1336,6 +1338,93 @@ while True:
1336
1338
  response = thread.send(user_input)
1337
1339
  print("Bot: ", response)
1338
1340
  ```
1341
+ ### `Local-rawdog`
1342
+ ```python
1343
+ import webscout.Local as ws
1344
+ from webscout.Local.rawdog import RawDog
1345
+ from webscout.Local.samplers import DefaultSampling
1346
+ from webscout.Local.formats import chatml, AdvancedFormat
1347
+ from webscout.Local.utils import download_model
1348
+ import datetime
1349
+ import sys
1350
+ import os
1351
+
1352
+ repo_id = "YorkieOH10/granite-8b-code-instruct-Q8_0-GGUF"
1353
+ filename = "granite-8b-code-instruct.Q8_0.gguf"
1354
+ model_path = download_model(repo_id, filename, token='')
1355
+
1356
+ # Load the model using the downloaded path
1357
+ model = ws.Model(model_path, n_gpu_layers=10)
1358
+
1359
+ rawdog = RawDog()
1360
+
1361
+ # Create an AdvancedFormat and modify the system content
1362
+ # Use a lambda to generate the prompt dynamically:
1363
+ chat_format = AdvancedFormat(chatml)
1364
+ # **Pre-format the intro_prompt string:**
1365
+ system_content = f"""
1366
+ You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
1367
+
1368
+ A typical interaction goes like this:
1369
+ 1. The user gives you a natural language PROMPT.
1370
+ 2. You:
1371
+ i. Determine what needs to be done
1372
+ ii. Write a short Python SCRIPT to do it
1373
+ iii. Communicate back to the user by printing to the console in that SCRIPT
1374
+ 3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
1375
+ it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
1376
+ 4. In case of exception, regenerate error free script.
1377
+
1378
+ If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
1379
+ This can be useful for summarizing documents or technical readouts, reading instructions before
1380
+ deciding what to do, or other tasks that require multi-step reasoning.
1381
+ A typical 'CONTINUE' interaction looks like this:
1382
+ 1. The user gives you a natural language PROMPT.
1383
+ 2. You:
1384
+ i. Determine what needs to be done
1385
+ ii. Determine that you need to see the output of some subprocess call to complete the task
1386
+ iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
1387
+ 3. The compiler
1388
+ i. Checks and runs your SCRIPT
1389
+ ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
1390
+ iii. Finds the word "CONTINUE" and sends control back to you
1391
+ 4. You again:
1392
+ i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
1393
+ ii. Write a short Python SCRIPT to do it
1394
+ iii. Communicate back to the user by printing to the console in that SCRIPT
1395
+ 5. The compiler...
1396
+
1397
+ Please follow these conventions carefully:
1398
+ - Decline any tasks that seem dangerous, irreversible, or that you don't understand.
1399
+ - Always review the full conversation prior to answering and maintain continuity.
1400
+ - If asked for information, just print the information clearly and concisely.
1401
+ - If asked to do something, print a concise summary of what you've done as confirmation.
1402
+ - If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
1403
+ - If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
1404
+ - Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
1405
+ - Actively clean up any temporary processes or files you use.
1406
+ - When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
1407
+ - You can plot anything with matplotlib.
1408
+ - ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
1409
+ """
1410
+ chat_format.override('system_content', lambda: system_content)
1411
+
1412
+ thread = ws.Thread(model, format=chat_format, sampler=DefaultSampling)
1413
+
1414
+ while True:
1415
+ prompt = input(">: ")
1416
+ if prompt.lower() == "q":
1417
+ break
1418
+
1419
+ response = thread.send(prompt)
1420
+
1421
+ # Process the response using RawDog
1422
+ script_output = rawdog.main(response)
1423
+
1424
+ if script_output:
1425
+ print(script_output)
1426
+
1427
+ ```
1339
1428
  ### `LLM` with internet
1340
1429
  ```python
1341
1430
  from __future__ import annotations
@@ -78,6 +78,7 @@ Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can
78
78
  - [`LLM`](#llm)
79
79
  - [`Local-LLM` webscout can now run GGUF models](#local-llm-webscout-can-now-run-gguf-models)
80
80
  - [`Function-calling-local-llm`](#function-calling-local-llm)
81
+ - [`Local-rawdog`](#local-rawdog)
81
82
  - [`LLM` with internet](#llm-with-internet)
82
83
  - [LLM with deepwebs](#llm-with-deepwebs)
83
84
  - [`Webai` - terminal gpt and a open interpeter](#webai---terminal-gpt-and-a-open-interpeter)
@@ -1180,6 +1181,7 @@ while True:
1180
1181
  print("AI: ", response)
1181
1182
  ```
1182
1183
  ### `Local-LLM` webscout can now run GGUF models
1184
+ Local LLM's some functions are taken from easy-llama
1183
1185
  ```python
1184
1186
  from webscout.Local.utils import download_model
1185
1187
  from webscout.Local.model import Model
@@ -1271,6 +1273,93 @@ while True:
1271
1273
  response = thread.send(user_input)
1272
1274
  print("Bot: ", response)
1273
1275
  ```
1276
+ ### `Local-rawdog`
1277
+ ```python
1278
+ import webscout.Local as ws
1279
+ from webscout.Local.rawdog import RawDog
1280
+ from webscout.Local.samplers import DefaultSampling
1281
+ from webscout.Local.formats import chatml, AdvancedFormat
1282
+ from webscout.Local.utils import download_model
1283
+ import datetime
1284
+ import sys
1285
+ import os
1286
+
1287
+ repo_id = "YorkieOH10/granite-8b-code-instruct-Q8_0-GGUF"
1288
+ filename = "granite-8b-code-instruct.Q8_0.gguf"
1289
+ model_path = download_model(repo_id, filename, token='')
1290
+
1291
+ # Load the model using the downloaded path
1292
+ model = ws.Model(model_path, n_gpu_layers=10)
1293
+
1294
+ rawdog = RawDog()
1295
+
1296
+ # Create an AdvancedFormat and modify the system content
1297
+ # Use a lambda to generate the prompt dynamically:
1298
+ chat_format = AdvancedFormat(chatml)
1299
+ # **Pre-format the intro_prompt string:**
1300
+ system_content = f"""
1301
+ You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
1302
+
1303
+ A typical interaction goes like this:
1304
+ 1. The user gives you a natural language PROMPT.
1305
+ 2. You:
1306
+ i. Determine what needs to be done
1307
+ ii. Write a short Python SCRIPT to do it
1308
+ iii. Communicate back to the user by printing to the console in that SCRIPT
1309
+ 3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
1310
+ it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
1311
+ 4. In case of exception, regenerate error free script.
1312
+
1313
+ If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
1314
+ This can be useful for summarizing documents or technical readouts, reading instructions before
1315
+ deciding what to do, or other tasks that require multi-step reasoning.
1316
+ A typical 'CONTINUE' interaction looks like this:
1317
+ 1. The user gives you a natural language PROMPT.
1318
+ 2. You:
1319
+ i. Determine what needs to be done
1320
+ ii. Determine that you need to see the output of some subprocess call to complete the task
1321
+ iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
1322
+ 3. The compiler
1323
+ i. Checks and runs your SCRIPT
1324
+ ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
1325
+ iii. Finds the word "CONTINUE" and sends control back to you
1326
+ 4. You again:
1327
+ i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
1328
+ ii. Write a short Python SCRIPT to do it
1329
+ iii. Communicate back to the user by printing to the console in that SCRIPT
1330
+ 5. The compiler...
1331
+
1332
+ Please follow these conventions carefully:
1333
+ - Decline any tasks that seem dangerous, irreversible, or that you don't understand.
1334
+ - Always review the full conversation prior to answering and maintain continuity.
1335
+ - If asked for information, just print the information clearly and concisely.
1336
+ - If asked to do something, print a concise summary of what you've done as confirmation.
1337
+ - If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
1338
+ - If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
1339
+ - Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
1340
+ - Actively clean up any temporary processes or files you use.
1341
+ - When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
1342
+ - You can plot anything with matplotlib.
1343
+ - ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
1344
+ """
1345
+ chat_format.override('system_content', lambda: system_content)
1346
+
1347
+ thread = ws.Thread(model, format=chat_format, sampler=DefaultSampling)
1348
+
1349
+ while True:
1350
+ prompt = input(">: ")
1351
+ if prompt.lower() == "q":
1352
+ break
1353
+
1354
+ response = thread.send(prompt)
1355
+
1356
+ # Process the response using RawDog
1357
+ script_output = rawdog.main(response)
1358
+
1359
+ if script_output:
1360
+ print(script_output)
1361
+
1362
+ ```
1274
1363
  ### `LLM` with internet
1275
1364
  ```python
1276
1365
  from __future__ import annotations
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="3.0",
8
+ version="3.1",
9
9
  description="Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -8,3 +8,4 @@ from . import utils
8
8
 
9
9
  from .model import Model
10
10
  from .thread import Thread
11
+ from .rawdog import *
@@ -1,3 +1,3 @@
1
1
  from llama_cpp import __version__ as __llama_cpp_version__
2
2
 
3
- __version__ = '2.9'
3
+ __version__ = '3.1'