webscout 3.6__py3-none-any.whl → 3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -0,0 +1,2 @@
1
+ from .gguf import *
2
+ from .autollama import *
@@ -0,0 +1,47 @@
1
+ import subprocess
2
+ import argparse
3
+ import os
4
+
5
+ def autollama(model_path, gguf_file):
6
+ """Manages models with Ollama using the autollama.sh script.
7
+
8
+ Args:
9
+ model_path (str): The path to the Hugging Face model.
10
+ gguf_file (str): The name of the GGUF file.
11
+ """
12
+
13
+ # Get the directory where this script is located
14
+ script_dir = os.path.dirname(os.path.abspath(__file__))
15
+
16
+ # Construct the path to the shell script
17
+ script_path = os.path.join(script_dir, "autollama.sh")
18
+
19
+ # Initialize command list
20
+ command = ["bash", script_path, "-m", model_path, "-g", gguf_file]
21
+
22
+ # Execute the command
23
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
24
+
25
+ # Print the output and error in real-time
26
+ for line in process.stdout:
27
+ print(line, end='')
28
+
29
+ for line in process.stderr:
30
+ print(line, end='')
31
+
32
+ process.wait()
33
+
34
+ def main():
35
+ parser = argparse.ArgumentParser(description='Run autollama.sh to manage models with Ollama')
36
+ parser.add_argument('-m', '--model_path', required=True, help='Set the path to the Hugging Face model')
37
+ parser.add_argument('-g', '--gguf_file', required=True, help='Set the GGUF file name')
38
+ args = parser.parse_args()
39
+
40
+ try:
41
+ autollama(args.model_path, args.gguf_file)
42
+ except Exception as e:
43
+ print(f"Error: {e}")
44
+ exit(1)
45
+
46
+ if __name__ == "__main__":
47
+ main()
webscout/Extra/gguf.py ADDED
@@ -0,0 +1,80 @@
1
+ import subprocess
2
+ import argparse
3
+ import os
4
+
5
+ def convert(model_id, username=None, token=None, quantization_methods="q4_k_m,q5_k_m"):
6
+ """Converts and quantizes a Hugging Face model to GGUF format.
7
+
8
+ Args:
9
+ model_id (str): The Hugging Face model ID (e.g., 'google/flan-t5-xl').
10
+ username (str, optional): Your Hugging Face username. Required for uploads.
11
+ token (str, optional): Your Hugging Face API token. Required for uploads.
12
+ quantization_methods (str, optional): Comma-separated quantization methods.
13
+ Defaults to "q4_k_m,q5_k_m".
14
+
15
+ Raises:
16
+ ValueError: If an invalid quantization method is provided.
17
+ """
18
+
19
+ # List of valid quantization methods
20
+ valid_methods = [
21
+ "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
22
+ "q4_0", "q4_1", "q4_k_m", "q4_k_s",
23
+ "q5_0", "q5_1", "q5_k_m", "q5_k_s",
24
+ "q6_k", "q8_0"
25
+ ]
26
+
27
+ # Validate the selected quantization methods
28
+ selected_methods_list = quantization_methods.split(',')
29
+ for method in selected_methods_list:
30
+ if method not in valid_methods:
31
+ raise ValueError(f"Invalid method: {method}. Please select from the available methods: {', '.join(valid_methods)}")
32
+
33
+ # Get the directory where this script is located
34
+ script_dir = os.path.dirname(os.path.abspath(__file__))
35
+
36
+ # Construct the path to the shell script
37
+ script_path = os.path.join(script_dir, "gguf.sh")
38
+
39
+ # Construct the command
40
+ command = ["bash", script_path, "-m", model_id]
41
+
42
+ if username:
43
+ command.extend(["-u", username])
44
+
45
+ if token:
46
+ command.extend(["-t", token])
47
+
48
+ if quantization_methods:
49
+ command.extend(["-q", quantization_methods])
50
+
51
+ # Execute the command
52
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
53
+
54
+ # Print the output and error in real-time
55
+ for line in process.stdout:
56
+ print(line, end='')
57
+
58
+ for line in process.stderr:
59
+ print(line, end='')
60
+
61
+ process.wait()
62
+
63
+ def main():
64
+ parser = argparse.ArgumentParser(description='Convert and quantize model using gguf.sh')
65
+ parser.add_argument('-m', '--model_id', required=True, help='Set the HF model ID (e.g., "google/flan-t5-xl")')
66
+ parser.add_argument('-u', '--username', help='Set your Hugging Face username (required for uploads)')
67
+ parser.add_argument('-t', '--token', help='Set your Hugging Face API token (required for uploads)')
68
+ parser.add_argument('-q', '--quantization_methods', default="q4_k_m,q5_k_m",
69
+ help='Comma-separated quantization methods (default: q4_k_m,q5_k_m). Valid methods: q2_k, q3_k_l, q3_k_m, q3_k_s, q4_0, q4_1, q4_k_m, q4_k_s, q5_0, q5_1, q5_k_m, q5_k_s, q6_k, q8_0')
70
+
71
+ args = parser.parse_args()
72
+
73
+ try:
74
+ convert(args.model_id, args.username, args.token, args.quantization_methods)
75
+ except ValueError as e:
76
+ print(e)
77
+ exit(1)
78
+
79
+ if __name__ == "__main__":
80
+ main()
@@ -1,3 +1,3 @@
1
1
  from llama_cpp import __version__ as __llama_cpp_version__
2
2
 
3
- __version__ = '3.4'
3
+ __version__ = '3.7'
webscout/__init__.py CHANGED
@@ -11,7 +11,7 @@ from .LLM import LLM
11
11
  import g4f
12
12
  # Import provider classes for direct access
13
13
  from .Provider import *
14
-
14
+ from .Extra import *
15
15
  __repo__ = "https://github.com/OE-LUCIFER/Webscout"
16
16
 
17
17
  webai = [
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "3.4"
1
+ __version__ = "3.7"
2
2
 
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 3.6
4
- Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
3
+ Version: 3.7
4
+ Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
7
  License: HelpingAI
@@ -62,7 +62,7 @@ Provides-Extra: local
62
62
  Requires-Dist: llama-cpp-python ; extra == 'local'
63
63
  Requires-Dist: colorama ; extra == 'local'
64
64
  Requires-Dist: numpy ; extra == 'local'
65
- Requires-Dist: huggingface-hub ; extra == 'local'
65
+ Requires-Dist: huggingface-hub[cli] ; extra == 'local'
66
66
 
67
67
  <div align="center">
68
68
  <!-- Replace `#` with your actual links -->
@@ -1447,13 +1447,19 @@ while True:
1447
1447
  # Print the response
1448
1448
  print("AI: ", response)
1449
1449
  ```
1450
- ### `Local-LLM` webscout can now run GGUF models
1451
- Local LLM's some functions are taken from easy-llama
1450
+
1451
+ ## Local-LLM
1452
+
1453
+ Webscout can now run GGUF models locally. You can download and run your favorite models with minimal configuration.
1454
+
1455
+ **Example:**
1456
+
1452
1457
  ```python
1453
1458
  from webscout.Local.utils import download_model
1454
1459
  from webscout.Local.model import Model
1455
1460
  from webscout.Local.thread import Thread
1456
1461
  from webscout.Local import formats
1462
+
1457
1463
  # 1. Download the model
1458
1464
  repo_id = "microsoft/Phi-3-mini-4k-instruct-gguf" # Replace with the desired Hugging Face repo
1459
1465
  filename = "Phi-3-mini-4k-instruct-q4.gguf" # Replace with the correct filename
@@ -1469,7 +1475,11 @@ thread = Thread(model, formats.phi3)
1469
1475
  thread.interact()
1470
1476
  ```
1471
1477
 
1472
- ### `Local-rawdog`
1478
+ ## Local-rawdog
1479
+ Webscout's local raw-dog feature allows you to run Python scripts within your terminal prompt.
1480
+
1481
+ **Example:**
1482
+
1473
1483
  ```python
1474
1484
  import webscout.Local as ws
1475
1485
  from webscout.Local.rawdog import RawDog
@@ -1556,6 +1566,63 @@ while True:
1556
1566
  print(script_output)
1557
1567
 
1558
1568
  ```
1569
+
1570
+ ## GGUF
1571
+
1572
+ Webscout provides tools to convert and quantize Hugging Face models into the GGUF format for use with offline LLMs.
1573
+
1574
+ **Example:**
1575
+
1576
+ ```python
1577
+ from webscout import gguf
1578
+ """
1579
+ Valid quantization methods:
1580
+ "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
1581
+ "q4_0", "q4_1", "q4_k_m", "q4_k_s",
1582
+ "q5_0", "q5_1", "q5_k_m", "q5_k_s",
1583
+ "q6_k", "q8_0"
1584
+ """
1585
+ gguf.convert(
1586
+ model_id="OEvortex/HelpingAI-Lite-1.5T", # Replace with your model ID
1587
+ username="Abhaykoul", # Replace with your Hugging Face username
1588
+ token="hf_token_write", # Replace with your Hugging Face token
1589
+ quantization_methods="q4_k_m" # Optional, adjust quantization methods
1590
+ )
1591
+ ```
1592
+
1593
+ ## Autollama
1594
+
1595
+ Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1596
+
1597
+ **Example:**
1598
+
1599
+ ```python
1600
+ from webscout import autollama
1601
+
1602
+ autollama.autollama(
1603
+ model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1604
+ gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1605
+ )
1606
+ ```
1607
+
1608
+ **Command Line Usage:**
1609
+
1610
+ * **GGUF Conversion:**
1611
+ ```bash
1612
+ python -m webscout.Extra.gguf -m "OEvortex/HelpingAI-Lite-1.5T" -u "your_username" -t "your_hf_token" -q "q4_k_m,q5_k_m"
1613
+ ```
1614
+
1615
+ * **Autollama:**
1616
+ ```bash
1617
+ python -m webscout.Extra.autollama -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
1618
+ ```
1619
+
1620
+ **Note:**
1621
+
1622
+ * Replace `"your_username"` and `"your_hf_token"` with your actual Hugging Face credentials.
1623
+ * The `model_path` in `autollama` is the Hugging Face model ID, and `gguf_file` is the GGUF file ID.
1624
+
1625
+
1559
1626
  ### `LLM` with internet
1560
1627
  ```python
1561
1628
  from __future__ import annotations
@@ -3,7 +3,7 @@ webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
3
3
  webscout/AIutel.py,sha256=MMfUvTQXYDtaFXsXtwKgv9V_qMK6WgOxdx7Wagdm2Lw,33542
4
4
  webscout/DWEBS.py,sha256=QLuT1IKu0lnwdl7W6c-ctBAO7Jj0Zk3PYm6-13BC7rU,25740
5
5
  webscout/LLM.py,sha256=LbGCZdJf8A5dwfoGS4tyy39tAh5BDdhMZP0ScKaaQfU,4184
6
- webscout/__init__.py,sha256=ugx2Z3KX710527ri6AWPRwme9HjuiQezAnwyltqzr4c,2038
6
+ webscout/__init__.py,sha256=WolS-VvktY4VezczjfbZuZR5iPCtCzPUV_WKqDFVIoA,2058
7
7
  webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
8
8
  webscout/async_providers.py,sha256=holBv5SxanxVXc_92CBBaXHlB2IakB_fHnhyZaFjYF8,684
9
9
  webscout/cli.py,sha256=enw_dPTCG3sNC1TXt96XccnpRmF4Etr99nh-RbGYags,18784
@@ -13,14 +13,17 @@ webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
13
13
  webscout/tempid.py,sha256=5oc3UbXhPGKxrMRTfRABT-V-dNzH_hOKWtLYM6iCWd4,5896
14
14
  webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
15
15
  webscout/utils.py,sha256=CxeXvp0rWIulUrEaPZMaNfg_tSuQLRSV8uuHA2chyKE,2603
16
- webscout/version.py,sha256=pTj22SSXb7rieyMXdGyEFmljJmZMa6FL_DaETjfeLwA,23
16
+ webscout/version.py,sha256=IuTIikIXiglYKmugXFivfp0USNzx9FUxd_CVlae9bgk,23
17
17
  webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
18
18
  webscout/webai.py,sha256=qkvhYdyF5wNdmW4rNdH3RbfQxabEWlGvCyAk2SbH04k,86602
19
19
  webscout/webscout_search.py,sha256=lFAot1-Qil_YfXieeLakDVDEX8Ckcima4ueXdOYwiMc,42804
20
20
  webscout/webscout_search_async.py,sha256=dooKGwLm0cwTml55Vy6NHPPY-nymEqX2h8laX94Zg5A,14537
21
21
  webscout/websx_search.py,sha256=n-qVwiHozJEF-GFRPcAfh4k1d_tscTmDe1dNL-1ngcU,12094
22
+ webscout/Extra/__init__.py,sha256=vlW4RoSl5v3d7j_Yq1XEMydrG9JM-On_afgK-HtRZsk,45
23
+ webscout/Extra/autollama.py,sha256=5OPVRETbRJomTerddMJtznE-GFAZvDx5BPRM9EQB9dU,1476
24
+ webscout/Extra/gguf.py,sha256=HrRF0hW3HZHwtu4OCFumlRTgMBqFpqK0JKyRTRyPWrs,3122
22
25
  webscout/Local/__init__.py,sha256=RN6klpbabPGNX2YzPm_hdeUcQvieUwvJt22uAO2RKSM,238
23
- webscout/Local/_version.py,sha256=hC_EHWR519ZOsyRw9i6gXEfU5IAIR_B9d3THLVmkWXw,83
26
+ webscout/Local/_version.py,sha256=3sFn1tDa2mT9Pb1-OGW4K3_zbiJ0mhPRqB2rnLfp28Q,83
24
27
  webscout/Local/formats.py,sha256=BiZZSoN3e8S6-S-ykBL9ogSUs0vK11GaZ3ghc9U8GRk,18994
25
28
  webscout/Local/model.py,sha256=T_bzNNrxEyOyLyhp6fKwiuVBBkXC2a37LzJVCxFIxOU,30710
26
29
  webscout/Local/rawdog.py,sha256=ojY_O8Vb1KvR34OwWdfLgllgaAK_7HMf64ElMATvCXs,36689
@@ -51,9 +54,9 @@ webscout/Provider/Xjai.py,sha256=BIlk2ouz9Kh_0Gg9hPvTqhI7XtcmWdg5vHSX_4uGrIs,903
51
54
  webscout/Provider/Yepchat.py,sha256=2Eit-A7w1ph1GQKNQuur_yaDzI64r0yBGxCIjDefJxQ,19875
52
55
  webscout/Provider/Youchat.py,sha256=UVGBuGSjv4uRibn1xflmCjYcfrRTKnDvX3adhag6T98,7976
53
56
  webscout/Provider/__init__.py,sha256=RaMdtYv7eQJ2vB8jXUHrkfNbx2DgRjbwc6DI40cOH1A,1809
54
- webscout-3.6.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
55
- webscout-3.6.dist-info/METADATA,sha256=YhKU0lcCGrDmUA-L1wy7ETzzmkWmbY5MvEfZg_6GlcU,67341
56
- webscout-3.6.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
57
- webscout-3.6.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
58
- webscout-3.6.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
59
- webscout-3.6.dist-info/RECORD,,
57
+ webscout-3.7.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
58
+ webscout-3.7.dist-info/METADATA,sha256=FocHEEpfWeT2aX3bUXJ8mHw1cWugF3tijDNqUnehB-o,69115
59
+ webscout-3.7.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
60
+ webscout-3.7.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
61
+ webscout-3.7.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
62
+ webscout-3.7.dist-info/RECORD,,
File without changes