webscout 3.6__tar.gz → 3.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (68) hide show
  1. {webscout-3.6/webscout.egg-info → webscout-3.8}/PKG-INFO +73 -6
  2. {webscout-3.6 → webscout-3.8}/README.md +70 -3
  3. {webscout-3.6 → webscout-3.8}/setup.py +3 -3
  4. webscout-3.8/webscout/Extra/__init__.py +2 -0
  5. webscout-3.8/webscout/Extra/autollama.py +198 -0
  6. webscout-3.8/webscout/Extra/gguf.py +240 -0
  7. {webscout-3.6 → webscout-3.8}/webscout/Local/_version.py +1 -1
  8. {webscout-3.6 → webscout-3.8}/webscout/__init__.py +2 -1
  9. webscout-3.8/webscout/version.py +2 -0
  10. {webscout-3.6 → webscout-3.8/webscout.egg-info}/PKG-INFO +73 -6
  11. {webscout-3.6 → webscout-3.8}/webscout.egg-info/SOURCES.txt +3 -0
  12. {webscout-3.6 → webscout-3.8}/webscout.egg-info/requires.txt +1 -1
  13. webscout-3.6/webscout/version.py +0 -2
  14. {webscout-3.6 → webscout-3.8}/LICENSE.md +0 -0
  15. {webscout-3.6 → webscout-3.8}/setup.cfg +0 -0
  16. {webscout-3.6 → webscout-3.8}/webscout/AIauto.py +0 -0
  17. {webscout-3.6 → webscout-3.8}/webscout/AIbase.py +0 -0
  18. {webscout-3.6 → webscout-3.8}/webscout/AIutel.py +0 -0
  19. {webscout-3.6 → webscout-3.8}/webscout/DWEBS.py +0 -0
  20. {webscout-3.6 → webscout-3.8}/webscout/LLM.py +0 -0
  21. {webscout-3.6 → webscout-3.8}/webscout/Local/__init__.py +0 -0
  22. {webscout-3.6 → webscout-3.8}/webscout/Local/formats.py +0 -0
  23. {webscout-3.6 → webscout-3.8}/webscout/Local/model.py +0 -0
  24. {webscout-3.6 → webscout-3.8}/webscout/Local/rawdog.py +0 -0
  25. {webscout-3.6 → webscout-3.8}/webscout/Local/samplers.py +0 -0
  26. {webscout-3.6 → webscout-3.8}/webscout/Local/thread.py +0 -0
  27. {webscout-3.6 → webscout-3.8}/webscout/Local/utils.py +0 -0
  28. {webscout-3.6 → webscout-3.8}/webscout/Provider/BasedGPT.py +0 -0
  29. {webscout-3.6 → webscout-3.8}/webscout/Provider/Berlin4h.py +0 -0
  30. {webscout-3.6 → webscout-3.8}/webscout/Provider/Blackboxai.py +0 -0
  31. {webscout-3.6 → webscout-3.8}/webscout/Provider/ChatGPTUK.py +0 -0
  32. {webscout-3.6 → webscout-3.8}/webscout/Provider/Cohere.py +0 -0
  33. {webscout-3.6 → webscout-3.8}/webscout/Provider/Deepinfra.py +0 -0
  34. {webscout-3.6 → webscout-3.8}/webscout/Provider/Deepseek.py +0 -0
  35. {webscout-3.6 → webscout-3.8}/webscout/Provider/Gemini.py +0 -0
  36. {webscout-3.6 → webscout-3.8}/webscout/Provider/Groq.py +0 -0
  37. {webscout-3.6 → webscout-3.8}/webscout/Provider/Koboldai.py +0 -0
  38. {webscout-3.6 → webscout-3.8}/webscout/Provider/Leo.py +0 -0
  39. {webscout-3.6 → webscout-3.8}/webscout/Provider/Llama2.py +0 -0
  40. {webscout-3.6 → webscout-3.8}/webscout/Provider/OpenGPT.py +0 -0
  41. {webscout-3.6 → webscout-3.8}/webscout/Provider/Openai.py +0 -0
  42. {webscout-3.6 → webscout-3.8}/webscout/Provider/Perplexity.py +0 -0
  43. {webscout-3.6 → webscout-3.8}/webscout/Provider/Phind.py +0 -0
  44. {webscout-3.6 → webscout-3.8}/webscout/Provider/Poe.py +0 -0
  45. {webscout-3.6 → webscout-3.8}/webscout/Provider/Reka.py +0 -0
  46. {webscout-3.6 → webscout-3.8}/webscout/Provider/ThinkAnyAI.py +0 -0
  47. {webscout-3.6 → webscout-3.8}/webscout/Provider/VTLchat.py +0 -0
  48. {webscout-3.6 → webscout-3.8}/webscout/Provider/Xjai.py +0 -0
  49. {webscout-3.6 → webscout-3.8}/webscout/Provider/Yepchat.py +0 -0
  50. {webscout-3.6 → webscout-3.8}/webscout/Provider/Youchat.py +0 -0
  51. {webscout-3.6 → webscout-3.8}/webscout/Provider/__init__.py +0 -0
  52. {webscout-3.6 → webscout-3.8}/webscout/__main__.py +0 -0
  53. {webscout-3.6 → webscout-3.8}/webscout/async_providers.py +0 -0
  54. {webscout-3.6 → webscout-3.8}/webscout/cli.py +0 -0
  55. {webscout-3.6 → webscout-3.8}/webscout/exceptions.py +0 -0
  56. {webscout-3.6 → webscout-3.8}/webscout/g4f.py +0 -0
  57. {webscout-3.6 → webscout-3.8}/webscout/models.py +0 -0
  58. {webscout-3.6 → webscout-3.8}/webscout/tempid.py +0 -0
  59. {webscout-3.6 → webscout-3.8}/webscout/transcriber.py +0 -0
  60. {webscout-3.6 → webscout-3.8}/webscout/utils.py +0 -0
  61. {webscout-3.6 → webscout-3.8}/webscout/voice.py +0 -0
  62. {webscout-3.6 → webscout-3.8}/webscout/webai.py +0 -0
  63. {webscout-3.6 → webscout-3.8}/webscout/webscout_search.py +0 -0
  64. {webscout-3.6 → webscout-3.8}/webscout/webscout_search_async.py +0 -0
  65. {webscout-3.6 → webscout-3.8}/webscout/websx_search.py +0 -0
  66. {webscout-3.6 → webscout-3.8}/webscout.egg-info/dependency_links.txt +0 -0
  67. {webscout-3.6 → webscout-3.8}/webscout.egg-info/entry_points.txt +0 -0
  68. {webscout-3.6 → webscout-3.8}/webscout.egg-info/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 3.6
4
- Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
3
+ Version: 3.8
4
+ Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
7
  License: HelpingAI
@@ -62,7 +62,7 @@ Provides-Extra: local
62
62
  Requires-Dist: llama-cpp-python; extra == "local"
63
63
  Requires-Dist: colorama; extra == "local"
64
64
  Requires-Dist: numpy; extra == "local"
65
- Requires-Dist: huggingface_hub; extra == "local"
65
+ Requires-Dist: huggingface_hub[cli]; extra == "local"
66
66
 
67
67
  <div align="center">
68
68
  <!-- Replace `#` with your actual links -->
@@ -1447,13 +1447,19 @@ while True:
1447
1447
  # Print the response
1448
1448
  print("AI: ", response)
1449
1449
  ```
1450
- ### `Local-LLM` webscout can now run GGUF models
1451
- Local LLM's some functions are taken from easy-llama
1450
+
1451
+ ## Local-LLM
1452
+
1453
+ Webscout can now run GGUF models locally. You can download and run your favorite models with minimal configuration.
1454
+
1455
+ **Example:**
1456
+
1452
1457
  ```python
1453
1458
  from webscout.Local.utils import download_model
1454
1459
  from webscout.Local.model import Model
1455
1460
  from webscout.Local.thread import Thread
1456
1461
  from webscout.Local import formats
1462
+
1457
1463
  # 1. Download the model
1458
1464
  repo_id = "microsoft/Phi-3-mini-4k-instruct-gguf" # Replace with the desired Hugging Face repo
1459
1465
  filename = "Phi-3-mini-4k-instruct-q4.gguf" # Replace with the correct filename
@@ -1469,7 +1475,11 @@ thread = Thread(model, formats.phi3)
1469
1475
  thread.interact()
1470
1476
  ```
1471
1477
 
1472
- ### `Local-rawdog`
1478
+ ## Local-rawdog
1479
+ Webscout's local raw-dog feature allows you to run Python scripts within your terminal prompt.
1480
+
1481
+ **Example:**
1482
+
1473
1483
  ```python
1474
1484
  import webscout.Local as ws
1475
1485
  from webscout.Local.rawdog import RawDog
@@ -1556,6 +1566,63 @@ while True:
1556
1566
  print(script_output)
1557
1567
 
1558
1568
  ```
1569
+
1570
+ ## GGUF
1571
+
1572
+ Webscout provides tools to convert and quantize Hugging Face models into the GGUF format for use with offline LLMs.
1573
+
1574
+ **Example:**
1575
+
1576
+ ```python
1577
+ from webscout import gguf
1578
+ """
1579
+ Valid quantization methods:
1580
+ "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
1581
+ "q4_0", "q4_1", "q4_k_m", "q4_k_s",
1582
+ "q5_0", "q5_1", "q5_k_m", "q5_k_s",
1583
+ "q6_k", "q8_0"
1584
+ """
1585
+ gguf.convert(
1586
+ model_id="OEvortex/HelpingAI-Lite-1.5T", # Replace with your model ID
1587
+ username="Abhaykoul", # Replace with your Hugging Face username
1588
+ token="hf_token_write", # Replace with your Hugging Face token
1589
+ quantization_methods="q4_k_m" # Optional, adjust quantization methods
1590
+ )
1591
+ ```
1592
+
1593
+ ## Autollama
1594
+
1595
+ Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1596
+
1597
+ **Example:**
1598
+
1599
+ ```python
1600
+ from webscout import autollama
1601
+
1602
+ autollama(
1603
+ model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1604
+ gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1605
+ )
1606
+ ```
1607
+
1608
+ **Command Line Usage:**
1609
+
1610
+ * **GGUF Conversion:**
1611
+ ```bash
1612
+ python -m webscout.Extra.gguf -m "OEvortex/HelpingAI-Lite-1.5T" -u "your_username" -t "your_hf_token" -q "q4_k_m,q5_k_m"
1613
+ ```
1614
+
1615
+ * **Autollama:**
1616
+ ```bash
1617
+ python -m webscout.Extra.autollama -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
1618
+ ```
1619
+
1620
+ **Note:**
1621
+
1622
+ * Replace `"your_username"` and `"your_hf_token"` with your actual Hugging Face credentials.
1623
+ * The `model_path` in `autollama` is the Hugging Face model ID, and `gguf_file` is the GGUF file ID.
1624
+
1625
+
1559
1626
  ### `LLM` with internet
1560
1627
  ```python
1561
1628
  from __future__ import annotations
@@ -1381,13 +1381,19 @@ while True:
1381
1381
  # Print the response
1382
1382
  print("AI: ", response)
1383
1383
  ```
1384
- ### `Local-LLM` webscout can now run GGUF models
1385
- Local LLM's some functions are taken from easy-llama
1384
+
1385
+ ## Local-LLM
1386
+
1387
+ Webscout can now run GGUF models locally. You can download and run your favorite models with minimal configuration.
1388
+
1389
+ **Example:**
1390
+
1386
1391
  ```python
1387
1392
  from webscout.Local.utils import download_model
1388
1393
  from webscout.Local.model import Model
1389
1394
  from webscout.Local.thread import Thread
1390
1395
  from webscout.Local import formats
1396
+
1391
1397
  # 1. Download the model
1392
1398
  repo_id = "microsoft/Phi-3-mini-4k-instruct-gguf" # Replace with the desired Hugging Face repo
1393
1399
  filename = "Phi-3-mini-4k-instruct-q4.gguf" # Replace with the correct filename
@@ -1403,7 +1409,11 @@ thread = Thread(model, formats.phi3)
1403
1409
  thread.interact()
1404
1410
  ```
1405
1411
 
1406
- ### `Local-rawdog`
1412
+ ## Local-rawdog
1413
+ Webscout's local raw-dog feature allows you to run Python scripts within your terminal prompt.
1414
+
1415
+ **Example:**
1416
+
1407
1417
  ```python
1408
1418
  import webscout.Local as ws
1409
1419
  from webscout.Local.rawdog import RawDog
@@ -1490,6 +1500,63 @@ while True:
1490
1500
  print(script_output)
1491
1501
 
1492
1502
  ```
1503
+
1504
+ ## GGUF
1505
+
1506
+ Webscout provides tools to convert and quantize Hugging Face models into the GGUF format for use with offline LLMs.
1507
+
1508
+ **Example:**
1509
+
1510
+ ```python
1511
+ from webscout import gguf
1512
+ """
1513
+ Valid quantization methods:
1514
+ "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
1515
+ "q4_0", "q4_1", "q4_k_m", "q4_k_s",
1516
+ "q5_0", "q5_1", "q5_k_m", "q5_k_s",
1517
+ "q6_k", "q8_0"
1518
+ """
1519
+ gguf.convert(
1520
+ model_id="OEvortex/HelpingAI-Lite-1.5T", # Replace with your model ID
1521
+ username="Abhaykoul", # Replace with your Hugging Face username
1522
+ token="hf_token_write", # Replace with your Hugging Face token
1523
+ quantization_methods="q4_k_m" # Optional, adjust quantization methods
1524
+ )
1525
+ ```
1526
+
1527
+ ## Autollama
1528
+
1529
+ Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1530
+
1531
+ **Example:**
1532
+
1533
+ ```python
1534
+ from webscout import autollama
1535
+
1536
+ autollama(
1537
+ model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1538
+ gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1539
+ )
1540
+ ```
1541
+
1542
+ **Command Line Usage:**
1543
+
1544
+ * **GGUF Conversion:**
1545
+ ```bash
1546
+ python -m webscout.Extra.gguf -m "OEvortex/HelpingAI-Lite-1.5T" -u "your_username" -t "your_hf_token" -q "q4_k_m,q5_k_m"
1547
+ ```
1548
+
1549
+ * **Autollama:**
1550
+ ```bash
1551
+ python -m webscout.Extra.autollama -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
1552
+ ```
1553
+
1554
+ **Note:**
1555
+
1556
+ * Replace `"your_username"` and `"your_hf_token"` with your actual Hugging Face credentials.
1557
+ * The `model_path` in `autollama` is the Hugging Face model ID, and `gguf_file` is the GGUF file ID.
1558
+
1559
+
1493
1560
  ### `LLM` with internet
1494
1561
  ```python
1495
1562
  from __future__ import annotations
@@ -5,8 +5,8 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="3.6",
9
- description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs",
8
+ version="3.8",
9
+ description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
12
12
  author="OEvortex",
@@ -75,7 +75,7 @@ setup(
75
75
  'llama-cpp-python',
76
76
  'colorama',
77
77
  'numpy',
78
- 'huggingface_hub',
78
+ 'huggingface_hub[cli]',
79
79
  ],
80
80
  },
81
81
  license="HelpingAI",
@@ -0,0 +1,2 @@
1
+ from .gguf import *
2
+ from .autollama import *
@@ -0,0 +1,198 @@
1
+ import subprocess
2
+ import argparse
3
+ import os
4
+
5
+ def autollama(model_path, gguf_file):
6
+ """Manages models with Ollama using the autollama.sh script.
7
+
8
+ Args:
9
+ model_path (str): The path to the Hugging Face model.
10
+ gguf_file (str): The name of the GGUF file.
11
+ """
12
+
13
+ # Check if autollama.sh exists in the current working directory
14
+ script_path = os.path.join(os.getcwd(), "autollama.sh")
15
+ if not os.path.exists(script_path):
16
+ # Create autollama.sh with the content provided
17
+ with open(script_path, "w") as f:
18
+ f.write("""
19
+ function show_art() {
20
+ cat << "EOF"
21
+ Made with love in India
22
+ EOF
23
+ }
24
+
25
+ show_art
26
+
27
+ # Initialize default values
28
+ MODEL_PATH=""
29
+ GGUF_FILE=""
30
+
31
+ # Display help/usage information
32
+ usage() {
33
+ echo "Usage: $0 -m <model_path> -g <gguf_file>"
34
+ echo
35
+ echo "Options:"
36
+ echo " -m <model_path> Set the path to the model"
37
+ echo " -g <gguf_file> Set the GGUF file name"
38
+ echo " -h Display this help and exit"
39
+ echo
40
+ }
41
+
42
+ # Parse command-line options
43
+ while getopts ":m:g:h" opt; do
44
+ case ${opt} in
45
+ m )
46
+ MODEL_PATH=$OPTARG
47
+ ;;
48
+ g )
49
+ GGUF_FILE=$OPTARG
50
+ ;;
51
+ h )
52
+ usage
53
+ exit 0
54
+ ;;
55
+ \? )
56
+ echo "Invalid Option: -$OPTARG" 1>&2
57
+ usage
58
+ exit 1
59
+ ;;
60
+ : )
61
+ echo "Invalid Option: -$OPTARG requires an argument" 1>&2
62
+ usage
63
+ exit 1
64
+ ;;
65
+ esac
66
+ done
67
+
68
+ # Check required parameters
69
+ if [ -z "$MODEL_PATH" ] || [ -z "$GGUF_FILE" ]; then
70
+ echo "Error: -m (model_path) and -g (gguf_file) are required."
71
+ usage
72
+ exit 1
73
+ fi
74
+
75
+ # Derive MODEL_NAME
76
+ MODEL_NAME=$(echo $GGUF_FILE | sed 's/\(.*\)\.Q4.*/\\1/')
77
+
78
+ # Log file where downloaded models are recorded
79
+ DOWNLOAD_LOG="downloaded_models.log"
80
+
81
+ # Composite logging name
82
+ LOGGING_NAME="${MODEL_PATH}_${MODEL_NAME}"
83
+
84
+ # Check if the model has been downloaded
85
+ function is_model_downloaded {
86
+ grep -qxF "$LOGGING_NAME" "$DOWNLOAD_LOG" && return 0 || return 1
87
+ }
88
+
89
+ # Log the downloaded model
90
+ function log_downloaded_model {
91
+ echo "$LOGGING_NAME" >> "$DOWNLOAD_LOG"
92
+ }
93
+
94
+ # Function to check if the model has already been created
95
+ function is_model_created {
96
+ # 'ollama list' lists all models
97
+ ollama list | grep -q "$MODEL_NAME" && return 0 || return 1
98
+ }
99
+
100
+ # Check if huggingface-hub is installed, and install it if not
101
+ if ! pip show huggingface-hub > /dev/null; then
102
+ echo "Installing huggingface-hub..."
103
+ pip install -U "huggingface_hub[cli]"
104
+ else
105
+ echo "huggingface-hub is already installed."
106
+ fi
107
+
108
+ # Check if the model has already been downloaded
109
+ if is_model_downloaded; then
110
+ echo "Model $LOGGING_NAME has already been downloaded. Skipping download."
111
+ else
112
+ echo "Downloading model $LOGGING_NAME..."
113
+ # Download the model
114
+ huggingface-cli download $MODEL_PATH $GGUF_FILE --local-dir downloads --local-dir-use-symlinks False
115
+
116
+ # Log the downloaded model
117
+ log_downloaded_model
118
+ echo "Model $LOGGING_NAME downloaded and logged."
119
+ fi
120
+
121
+ # Check if Ollama is installed, and install it if not
122
+ if ! command -v ollama &> /dev/null; then
123
+ echo "Installing Ollama..."
124
+ curl -fsSL https://ollama.com/install.sh | sh
125
+ else
126
+ echo "Ollama is already installed."
127
+ fi
128
+
129
+ # Check if Ollama is already running
130
+ if pgrep -f 'ollama serve' > /dev/null; then
131
+ echo "Ollama is already running. Skipping the start."
132
+ else
133
+ echo "Starting Ollama..."
134
+ # Start Ollama in the background
135
+ ollama serve &
136
+
137
+ # Wait for Ollama to start
138
+ while true; do
139
+ if pgrep -f 'ollama serve' > /dev/null; then
140
+ echo "Ollama has started."
141
+ sleep 60
142
+ break
143
+ else
144
+ echo "Waiting for Ollama to start..."
145
+ sleep 1 # Wait for 1 second before checking again
146
+ fi
147
+ done
148
+ fi
149
+
150
+ # Check if the model has already been created
151
+ if is_model_created; then
152
+ echo "Model $MODEL_NAME is already created. Skipping creation."
153
+ else
154
+ echo "Creating model $MODEL_NAME..."
155
+ # Create the model in Ollama
156
+ # Prepare Modelfile with the downloaded path
157
+ echo "FROM ./downloads/$GGUF_FILE" > Modelfile
158
+ ollama create $MODEL_NAME -f Modelfile
159
+ echo "Model $MODEL_NAME created."
160
+ fi
161
+
162
+
163
+ echo "model name is > $MODEL_NAME"
164
+ echo "Use Ollama run $MODEL_NAME"
165
+ """)
166
+ # Make autollama.sh executable (using chmod)
167
+ os.chmod(script_path, 0o755)
168
+
169
+ # Initialize command list
170
+ command = ["bash", script_path, "-m", model_path, "-g", gguf_file]
171
+
172
+ # Execute the command
173
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
174
+
175
+ # Print the output and error in real-time
176
+ for line in process.stdout:
177
+ print(line, end='')
178
+
179
+ for line in process.stderr:
180
+ print(line, end='')
181
+
182
+ process.wait()
183
+
184
+ def main():
185
+ parser = argparse.ArgumentParser(description='Automatically create and run an Ollama model in Ollama')
186
+ parser.add_argument('-m', '--model_path', required=True, help='Set the hunggingface model id to the Hugging Face model')
187
+ parser.add_argument('-g', '--gguf_file', required=True, help='Set the GGUF file name')
188
+ args = parser.parse_args()
189
+
190
+ try:
191
+ autollama(args.model_path, args.gguf_file)
192
+ except Exception as e:
193
+ print(f"Error: {e}")
194
+ exit(1)
195
+
196
+ if __name__ == "__main__":
197
+ main()
198
+
@@ -0,0 +1,240 @@
1
+ # webscout/Extra/gguf.py
2
+ import subprocess
3
+ import argparse
4
+ import os
5
+
6
+ def convert(model_id, username=None, token=None, quantization_methods="q4_k_m,q5_k_m"):
7
+ """Converts and quantizes a Hugging Face model to GGUF format.
8
+
9
+ Args:
10
+ model_id (str): The Hugging Face model ID (e.g., 'google/flan-t5-xl').
11
+ username (str, optional): Your Hugging Face username. Required for uploads.
12
+ token (str, optional): Your Hugging Face API token. Required for uploads.
13
+ quantization_methods (str, optional): Comma-separated quantization methods.
14
+ Defaults to "q4_k_m,q5_k_m".
15
+
16
+ Raises:
17
+ ValueError: If an invalid quantization method is provided.
18
+ """
19
+
20
+ # List of valid quantization methods
21
+ valid_methods = [
22
+ "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
23
+ "q4_0", "q4_1", "q4_k_m", "q4_k_s",
24
+ "q5_0", "q5_1", "q5_k_m", "q5_k_s",
25
+ "q6_k", "q8_0"
26
+ ]
27
+
28
+ # Validate the selected quantization methods
29
+ selected_methods_list = quantization_methods.split(',')
30
+ for method in selected_methods_list:
31
+ if method not in valid_methods:
32
+ raise ValueError(f"Invalid method: {method}. Please select from the available methods: {', '.join(valid_methods)}")
33
+
34
+ # Construct the absolute path to the shell script
35
+ script_path = os.path.join(os.getcwd(), "gguf.sh")
36
+ if not os.path.exists(script_path):
37
+ # Create autollama.sh with the content provided
38
+ with open(script_path, "w") as f:
39
+ f.write("""
40
+ cat << "EOF"
41
+ Made with love in India
42
+ EOF
43
+
44
+ # Default values
45
+ MODEL_ID=""
46
+ USERNAME=""
47
+ TOKEN=""
48
+ QUANTIZATION_METHODS="q4_k_m,q5_k_m" # Default to "q4_k_m,q5_k_m" if not provided
49
+
50
+ # Display help/usage information
51
+ usage() {
52
+ echo "Usage: $0 -m MODEL_ID [-u USERNAME] [-t TOKEN] [-q QUANTIZATION_METHODS]"
53
+ echo
54
+ echo "Options:"
55
+ echo " -m MODEL_ID Required: Set the HF model ID"
56
+ echo " -u USERNAME Optional: Set the username"
57
+ echo " -t TOKEN Optional: Set the token"
58
+ echo " -q QUANTIZATION_METHODS Optional: Set the quantization methods (default: q4_k_m,q5_k_m)"
59
+ echo " -h Display this help and exit"
60
+ echo
61
+ }
62
+
63
+ # Parse command-line options
64
+ while getopts ":m:u:t:q:h" opt; do
65
+ case ${opt} in
66
+ m )
67
+ MODEL_ID=$OPTARG
68
+ ;;
69
+ u )
70
+ USERNAME=$OPTARG
71
+ ;;
72
+ t )
73
+ TOKEN=$OPTARG
74
+ ;;
75
+ q )
76
+ QUANTIZATION_METHODS=$OPTARG
77
+ ;;
78
+ h )
79
+ usage
80
+ exit 0
81
+ ;;
82
+ \? )
83
+ echo "Invalid Option: -$OPTARG" 1>&2
84
+ usage
85
+ exit 1
86
+ ;;
87
+ : )
88
+ echo "Invalid Option: -$OPTARG requires an argument" 1>&2
89
+ usage
90
+ exit 1
91
+ ;;
92
+ esac
93
+ done
94
+ shift $((OPTIND -1))
95
+
96
+ # Ensure MODEL_ID is provided
97
+ if [ -z "$MODEL_ID" ]; then
98
+ echo "Error: MODEL_ID is required."
99
+ usage
100
+ exit 1
101
+ fi
102
+
103
+ # # Echoing the arguments for checking
104
+ # echo "MODEL_ID: $MODEL_ID"
105
+ # echo "USERNAME: ${USERNAME:-'Not provided'}"
106
+ # echo "TOKEN: ${TOKEN:-'Not provided'}"
107
+ # echo "QUANTIZATION_METHODS: $QUANTIZATION_METHODS"
108
+
109
+ # Splitting string into an array for quantization methods, if provided
110
+ IFS=',' read -r -a QUANTIZATION_METHOD_ARRAY <<< "$QUANTIZATION_METHODS"
111
+ echo "Quantization Methods: ${QUANTIZATION_METHOD_ARRAY[@]}"
112
+
113
+ MODEL_NAME=$(echo "$MODEL_ID" | awk -F'/' '{print $NF}')
114
+
115
+
116
+ # ----------- llama.cpp setup block-----------
117
+ # Check if llama.cpp is already installed and skip the build step if it is
118
+ if [ ! -d "llama.cpp" ]; then
119
+ echo "llama.cpp not found. Cloning and setting up..."
120
+ git clone https://github.com/ggerganov/llama.cpp
121
+ cd llama.cpp && git pull
122
+ # Install required packages
123
+ pip3 install -r requirements.txt
124
+ # Build llama.cpp as it's freshly cloned
125
+ if ! command -v nvcc &> /dev/null
126
+ then
127
+ echo "nvcc could not be found, building llama without LLAMA_CUBLAS"
128
+ make clean && make
129
+ else
130
+ make clean && LLAMA_CUBLAS=1 make
131
+ fi
132
+ cd ..
133
+ else
134
+ echo "llama.cpp found. Assuming it's already built and up to date."
135
+ # Optionally, still update dependencies
136
+ # cd llama.cpp && pip3 install -r requirements.txt && cd ..
137
+ fi
138
+ # ----------- llama.cpp setup block-----------
139
+
140
+
141
+
142
+
143
+ # Download model
144
+ #todo : shall we put condition to check if model has been already downloaded? similar to autogguf?
145
+ echo "Downloading the model..."
146
+ huggingface-cli download "$MODEL_ID" --local-dir "./${MODEL_NAME}" --local-dir-use-symlinks False --revision main
147
+
148
+
149
+ # Convert to fp16
150
+ FP16="${MODEL_NAME}/${MODEL_NAME,,}.fp16.bin"
151
+ echo "Converting the model to fp16..."
152
+ python3 llama.cpp/convert-hf-to-gguf.py "$MODEL_NAME" --outtype f16 --outfile "$FP16"
153
+
154
+ # Quantize the model
155
+ echo "Quantizing the model..."
156
+ for METHOD in "${QUANTIZATION_METHOD_ARRAY[@]}"; do
157
+ QTYPE="${MODEL_NAME}/${MODEL_NAME,,}.${METHOD^^}.gguf"
158
+ ./llama.cpp/llama-quantize "$FP16" "$QTYPE" "$METHOD"
159
+ done
160
+
161
+
162
+ # Check if USERNAME and TOKEN are provided
163
+ if [[ -n "$USERNAME" && -n "$TOKEN" ]]; then
164
+
165
+ # Login to Hugging Face
166
+ echo "Logging in to Hugging Face..."
167
+ huggingface-cli login --token "$TOKEN"
168
+
169
+
170
+ # Uploading .gguf, .md files, and config.json
171
+ echo "Uploading .gguf, .md files, and config.json..."
172
+
173
+
174
+ # Define a temporary directory
175
+ TEMP_DIR="./temp_upload_dir"
176
+
177
+ # Create the temporary directory
178
+ mkdir -p "${TEMP_DIR}"
179
+
180
+ # Copy the specific files to the temporary directory
181
+ find "./${MODEL_NAME}" -type f \( -name "*.gguf" -o -name "*.md" -o -name "config.json" \) -exec cp {} "${TEMP_DIR}/" \;
182
+
183
+ # Upload the temporary directory to Hugging Face
184
+ huggingface-cli upload "${USERNAME}/${MODEL_NAME}-GGUF" "${TEMP_DIR}" --private
185
+
186
+ # Remove the temporary directory after upload
187
+ rm -rf "${TEMP_DIR}"
188
+ echo "Upload completed."
189
+ else
190
+ echo "USERNAME and TOKEN must be provided for upload."
191
+ fi
192
+
193
+ echo "Script completed."
194
+ """)
195
+ # Make autollama.sh executable (using chmod)
196
+ os.chmod(script_path, 0o755)
197
+
198
+ # Construct the command
199
+ command = ["bash", script_path, "-m", model_id]
200
+
201
+ if username:
202
+ command.extend(["-u", username])
203
+
204
+ if token:
205
+ command.extend(["-t", token])
206
+
207
+ if quantization_methods:
208
+ command.extend(["-q", quantization_methods])
209
+
210
+ # Execute the command
211
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
212
+
213
+ # Print the output and error in real-time
214
+ for line in process.stdout:
215
+ print(line, end='')
216
+
217
+ for line in process.stderr:
218
+ print(line, end='')
219
+
220
+ process.wait()
221
+
222
+
223
+ def main():
224
+ parser = argparse.ArgumentParser(description='Convert and quantize model using gguf.sh')
225
+ parser.add_argument('-m', '--model_id', required=True, help='Set the HF model ID (e.g., "google/flan-t5-xl")')
226
+ parser.add_argument('-u', '--username', help='Set your Hugging Face username (required for uploads)')
227
+ parser.add_argument('-t', '--token', help='Set your Hugging Face API token (required for uploads)')
228
+ parser.add_argument('-q', '--quantization_methods', default="q4_k_m,q5_k_m",
229
+ help='Comma-separated quantization methods (default: q4_k_m,q5_k_m). Valid methods: q2_k, q3_k_l, q3_k_m, q3_k_s, q4_0, q4_1, q4_k_m, q4_k_s, q5_0, q5_1, q5_k_m, q5_k_s, q6_k, q8_0')
230
+
231
+ args = parser.parse_args()
232
+
233
+ try:
234
+ convert(args.model_id, args.username, args.token, args.quantization_methods)
235
+ except ValueError as e:
236
+ print(e)
237
+ exit(1)
238
+
239
+ if __name__ == "__main__":
240
+ main()
@@ -1,3 +1,3 @@
1
1
  from llama_cpp import __version__ as __llama_cpp_version__
2
2
 
3
- __version__ = '3.4'
3
+ __version__ = '3.7'
@@ -11,7 +11,8 @@ from .LLM import LLM
11
11
  import g4f
12
12
  # Import provider classes for direct access
13
13
  from .Provider import *
14
-
14
+ from .Extra import gguf
15
+ from .Extra import autollama
15
16
  __repo__ = "https://github.com/OE-LUCIFER/Webscout"
16
17
 
17
18
  webai = [
@@ -0,0 +1,2 @@
1
+ __version__ = "3.7"
2
+
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 3.6
4
- Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
3
+ Version: 3.8
4
+ Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
7
  License: HelpingAI
@@ -62,7 +62,7 @@ Provides-Extra: local
62
62
  Requires-Dist: llama-cpp-python; extra == "local"
63
63
  Requires-Dist: colorama; extra == "local"
64
64
  Requires-Dist: numpy; extra == "local"
65
- Requires-Dist: huggingface_hub; extra == "local"
65
+ Requires-Dist: huggingface_hub[cli]; extra == "local"
66
66
 
67
67
  <div align="center">
68
68
  <!-- Replace `#` with your actual links -->
@@ -1447,13 +1447,19 @@ while True:
1447
1447
  # Print the response
1448
1448
  print("AI: ", response)
1449
1449
  ```
1450
- ### `Local-LLM` webscout can now run GGUF models
1451
- Local LLM's some functions are taken from easy-llama
1450
+
1451
+ ## Local-LLM
1452
+
1453
+ Webscout can now run GGUF models locally. You can download and run your favorite models with minimal configuration.
1454
+
1455
+ **Example:**
1456
+
1452
1457
  ```python
1453
1458
  from webscout.Local.utils import download_model
1454
1459
  from webscout.Local.model import Model
1455
1460
  from webscout.Local.thread import Thread
1456
1461
  from webscout.Local import formats
1462
+
1457
1463
  # 1. Download the model
1458
1464
  repo_id = "microsoft/Phi-3-mini-4k-instruct-gguf" # Replace with the desired Hugging Face repo
1459
1465
  filename = "Phi-3-mini-4k-instruct-q4.gguf" # Replace with the correct filename
@@ -1469,7 +1475,11 @@ thread = Thread(model, formats.phi3)
1469
1475
  thread.interact()
1470
1476
  ```
1471
1477
 
1472
- ### `Local-rawdog`
1478
+ ## Local-rawdog
1479
+ Webscout's local raw-dog feature allows you to run Python scripts within your terminal prompt.
1480
+
1481
+ **Example:**
1482
+
1473
1483
  ```python
1474
1484
  import webscout.Local as ws
1475
1485
  from webscout.Local.rawdog import RawDog
@@ -1556,6 +1566,63 @@ while True:
1556
1566
  print(script_output)
1557
1567
 
1558
1568
  ```
1569
+
1570
+ ## GGUF
1571
+
1572
+ Webscout provides tools to convert and quantize Hugging Face models into the GGUF format for use with offline LLMs.
1573
+
1574
+ **Example:**
1575
+
1576
+ ```python
1577
+ from webscout import gguf
1578
+ """
1579
+ Valid quantization methods:
1580
+ "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
1581
+ "q4_0", "q4_1", "q4_k_m", "q4_k_s",
1582
+ "q5_0", "q5_1", "q5_k_m", "q5_k_s",
1583
+ "q6_k", "q8_0"
1584
+ """
1585
+ gguf.convert(
1586
+ model_id="OEvortex/HelpingAI-Lite-1.5T", # Replace with your model ID
1587
+ username="Abhaykoul", # Replace with your Hugging Face username
1588
+ token="hf_token_write", # Replace with your Hugging Face token
1589
+ quantization_methods="q4_k_m" # Optional, adjust quantization methods
1590
+ )
1591
+ ```
1592
+
1593
+ ## Autollama
1594
+
1595
+ Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1596
+
1597
+ **Example:**
1598
+
1599
+ ```python
1600
+ from webscout import autollama
1601
+
1602
+ autollama(
1603
+ model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1604
+ gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1605
+ )
1606
+ ```
1607
+
1608
+ **Command Line Usage:**
1609
+
1610
+ * **GGUF Conversion:**
1611
+ ```bash
1612
+ python -m webscout.Extra.gguf -m "OEvortex/HelpingAI-Lite-1.5T" -u "your_username" -t "your_hf_token" -q "q4_k_m,q5_k_m"
1613
+ ```
1614
+
1615
+ * **Autollama:**
1616
+ ```bash
1617
+ python -m webscout.Extra.autollama -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
1618
+ ```
1619
+
1620
+ **Note:**
1621
+
1622
+ * Replace `"your_username"` and `"your_hf_token"` with your actual Hugging Face credentials.
1623
+ * The `model_path` in `autollama` is the Hugging Face model ID, and `gguf_file` is the GGUF file ID.
1624
+
1625
+
1559
1626
  ### `LLM` with internet
1560
1627
  ```python
1561
1628
  from __future__ import annotations
@@ -28,6 +28,9 @@ webscout.egg-info/dependency_links.txt
28
28
  webscout.egg-info/entry_points.txt
29
29
  webscout.egg-info/requires.txt
30
30
  webscout.egg-info/top_level.txt
31
+ webscout/Extra/__init__.py
32
+ webscout/Extra/autollama.py
33
+ webscout/Extra/gguf.py
31
34
  webscout/Local/__init__.py
32
35
  webscout/Local/_version.py
33
36
  webscout/Local/formats.py
@@ -38,4 +38,4 @@ pytest>=7.4.2
38
38
  llama-cpp-python
39
39
  colorama
40
40
  numpy
41
- huggingface_hub
41
+ huggingface_hub[cli]
@@ -1,2 +0,0 @@
1
- __version__ = "3.4"
2
-
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes