webscout 3.7__tar.gz → 3.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (69) hide show
  1. {webscout-3.7/webscout.egg-info → webscout-3.8}/PKG-INFO +2 -2
  2. {webscout-3.7 → webscout-3.8}/README.md +1 -1
  3. {webscout-3.7 → webscout-3.8}/setup.py +1 -1
  4. webscout-3.8/webscout/Extra/autollama.py +198 -0
  5. webscout-3.8/webscout/Extra/gguf.py +240 -0
  6. {webscout-3.7 → webscout-3.8}/webscout/__init__.py +2 -1
  7. {webscout-3.7 → webscout-3.8/webscout.egg-info}/PKG-INFO +2 -2
  8. webscout-3.7/webscout/Extra/autollama.py +0 -47
  9. webscout-3.7/webscout/Extra/gguf.py +0 -80
  10. {webscout-3.7 → webscout-3.8}/LICENSE.md +0 -0
  11. {webscout-3.7 → webscout-3.8}/setup.cfg +0 -0
  12. {webscout-3.7 → webscout-3.8}/webscout/AIauto.py +0 -0
  13. {webscout-3.7 → webscout-3.8}/webscout/AIbase.py +0 -0
  14. {webscout-3.7 → webscout-3.8}/webscout/AIutel.py +0 -0
  15. {webscout-3.7 → webscout-3.8}/webscout/DWEBS.py +0 -0
  16. {webscout-3.7 → webscout-3.8}/webscout/Extra/__init__.py +0 -0
  17. {webscout-3.7 → webscout-3.8}/webscout/LLM.py +0 -0
  18. {webscout-3.7 → webscout-3.8}/webscout/Local/__init__.py +0 -0
  19. {webscout-3.7 → webscout-3.8}/webscout/Local/_version.py +0 -0
  20. {webscout-3.7 → webscout-3.8}/webscout/Local/formats.py +0 -0
  21. {webscout-3.7 → webscout-3.8}/webscout/Local/model.py +0 -0
  22. {webscout-3.7 → webscout-3.8}/webscout/Local/rawdog.py +0 -0
  23. {webscout-3.7 → webscout-3.8}/webscout/Local/samplers.py +0 -0
  24. {webscout-3.7 → webscout-3.8}/webscout/Local/thread.py +0 -0
  25. {webscout-3.7 → webscout-3.8}/webscout/Local/utils.py +0 -0
  26. {webscout-3.7 → webscout-3.8}/webscout/Provider/BasedGPT.py +0 -0
  27. {webscout-3.7 → webscout-3.8}/webscout/Provider/Berlin4h.py +0 -0
  28. {webscout-3.7 → webscout-3.8}/webscout/Provider/Blackboxai.py +0 -0
  29. {webscout-3.7 → webscout-3.8}/webscout/Provider/ChatGPTUK.py +0 -0
  30. {webscout-3.7 → webscout-3.8}/webscout/Provider/Cohere.py +0 -0
  31. {webscout-3.7 → webscout-3.8}/webscout/Provider/Deepinfra.py +0 -0
  32. {webscout-3.7 → webscout-3.8}/webscout/Provider/Deepseek.py +0 -0
  33. {webscout-3.7 → webscout-3.8}/webscout/Provider/Gemini.py +0 -0
  34. {webscout-3.7 → webscout-3.8}/webscout/Provider/Groq.py +0 -0
  35. {webscout-3.7 → webscout-3.8}/webscout/Provider/Koboldai.py +0 -0
  36. {webscout-3.7 → webscout-3.8}/webscout/Provider/Leo.py +0 -0
  37. {webscout-3.7 → webscout-3.8}/webscout/Provider/Llama2.py +0 -0
  38. {webscout-3.7 → webscout-3.8}/webscout/Provider/OpenGPT.py +0 -0
  39. {webscout-3.7 → webscout-3.8}/webscout/Provider/Openai.py +0 -0
  40. {webscout-3.7 → webscout-3.8}/webscout/Provider/Perplexity.py +0 -0
  41. {webscout-3.7 → webscout-3.8}/webscout/Provider/Phind.py +0 -0
  42. {webscout-3.7 → webscout-3.8}/webscout/Provider/Poe.py +0 -0
  43. {webscout-3.7 → webscout-3.8}/webscout/Provider/Reka.py +0 -0
  44. {webscout-3.7 → webscout-3.8}/webscout/Provider/ThinkAnyAI.py +0 -0
  45. {webscout-3.7 → webscout-3.8}/webscout/Provider/VTLchat.py +0 -0
  46. {webscout-3.7 → webscout-3.8}/webscout/Provider/Xjai.py +0 -0
  47. {webscout-3.7 → webscout-3.8}/webscout/Provider/Yepchat.py +0 -0
  48. {webscout-3.7 → webscout-3.8}/webscout/Provider/Youchat.py +0 -0
  49. {webscout-3.7 → webscout-3.8}/webscout/Provider/__init__.py +0 -0
  50. {webscout-3.7 → webscout-3.8}/webscout/__main__.py +0 -0
  51. {webscout-3.7 → webscout-3.8}/webscout/async_providers.py +0 -0
  52. {webscout-3.7 → webscout-3.8}/webscout/cli.py +0 -0
  53. {webscout-3.7 → webscout-3.8}/webscout/exceptions.py +0 -0
  54. {webscout-3.7 → webscout-3.8}/webscout/g4f.py +0 -0
  55. {webscout-3.7 → webscout-3.8}/webscout/models.py +0 -0
  56. {webscout-3.7 → webscout-3.8}/webscout/tempid.py +0 -0
  57. {webscout-3.7 → webscout-3.8}/webscout/transcriber.py +0 -0
  58. {webscout-3.7 → webscout-3.8}/webscout/utils.py +0 -0
  59. {webscout-3.7 → webscout-3.8}/webscout/version.py +0 -0
  60. {webscout-3.7 → webscout-3.8}/webscout/voice.py +0 -0
  61. {webscout-3.7 → webscout-3.8}/webscout/webai.py +0 -0
  62. {webscout-3.7 → webscout-3.8}/webscout/webscout_search.py +0 -0
  63. {webscout-3.7 → webscout-3.8}/webscout/webscout_search_async.py +0 -0
  64. {webscout-3.7 → webscout-3.8}/webscout/websx_search.py +0 -0
  65. {webscout-3.7 → webscout-3.8}/webscout.egg-info/SOURCES.txt +0 -0
  66. {webscout-3.7 → webscout-3.8}/webscout.egg-info/dependency_links.txt +0 -0
  67. {webscout-3.7 → webscout-3.8}/webscout.egg-info/entry_points.txt +0 -0
  68. {webscout-3.7 → webscout-3.8}/webscout.egg-info/requires.txt +0 -0
  69. {webscout-3.7 → webscout-3.8}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 3.7
3
+ Version: 3.8
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -1599,7 +1599,7 @@ Webscout's `autollama` utility download model from huggingface and then automati
1599
1599
  ```python
1600
1600
  from webscout import autollama
1601
1601
 
1602
- autollama.autollama(
1602
+ autollama(
1603
1603
  model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1604
1604
  gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1605
1605
  )
@@ -1533,7 +1533,7 @@ Webscout's `autollama` utility download model from huggingface and then automati
1533
1533
  ```python
1534
1534
  from webscout import autollama
1535
1535
 
1536
- autollama.autollama(
1536
+ autollama(
1537
1537
  model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1538
1538
  gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1539
1539
  )
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="3.7",
8
+ version="3.8",
9
9
  description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -0,0 +1,198 @@
1
+ import subprocess
2
+ import argparse
3
+ import os
4
+
5
+ def autollama(model_path, gguf_file):
6
+ """Manages models with Ollama using the autollama.sh script.
7
+
8
+ Args:
9
+ model_path (str): The path to the Hugging Face model.
10
+ gguf_file (str): The name of the GGUF file.
11
+ """
12
+
13
+ # Check if autollama.sh exists in the current working directory
14
+ script_path = os.path.join(os.getcwd(), "autollama.sh")
15
+ if not os.path.exists(script_path):
16
+ # Create autollama.sh with the content provided
17
+ with open(script_path, "w") as f:
18
+ f.write("""
19
+ function show_art() {
20
+ cat << "EOF"
21
+ Made with love in India
22
+ EOF
23
+ }
24
+
25
+ show_art
26
+
27
+ # Initialize default values
28
+ MODEL_PATH=""
29
+ GGUF_FILE=""
30
+
31
+ # Display help/usage information
32
+ usage() {
33
+ echo "Usage: $0 -m <model_path> -g <gguf_file>"
34
+ echo
35
+ echo "Options:"
36
+ echo " -m <model_path> Set the path to the model"
37
+ echo " -g <gguf_file> Set the GGUF file name"
38
+ echo " -h Display this help and exit"
39
+ echo
40
+ }
41
+
42
+ # Parse command-line options
43
+ while getopts ":m:g:h" opt; do
44
+ case ${opt} in
45
+ m )
46
+ MODEL_PATH=$OPTARG
47
+ ;;
48
+ g )
49
+ GGUF_FILE=$OPTARG
50
+ ;;
51
+ h )
52
+ usage
53
+ exit 0
54
+ ;;
55
+ \? )
56
+ echo "Invalid Option: -$OPTARG" 1>&2
57
+ usage
58
+ exit 1
59
+ ;;
60
+ : )
61
+ echo "Invalid Option: -$OPTARG requires an argument" 1>&2
62
+ usage
63
+ exit 1
64
+ ;;
65
+ esac
66
+ done
67
+
68
+ # Check required parameters
69
+ if [ -z "$MODEL_PATH" ] || [ -z "$GGUF_FILE" ]; then
70
+ echo "Error: -m (model_path) and -g (gguf_file) are required."
71
+ usage
72
+ exit 1
73
+ fi
74
+
75
+ # Derive MODEL_NAME
76
+ MODEL_NAME=$(echo $GGUF_FILE | sed 's/\(.*\)\.Q4.*/\\1/')
77
+
78
+ # Log file where downloaded models are recorded
79
+ DOWNLOAD_LOG="downloaded_models.log"
80
+
81
+ # Composite logging name
82
+ LOGGING_NAME="${MODEL_PATH}_${MODEL_NAME}"
83
+
84
+ # Check if the model has been downloaded
85
+ function is_model_downloaded {
86
+ grep -qxF "$LOGGING_NAME" "$DOWNLOAD_LOG" && return 0 || return 1
87
+ }
88
+
89
+ # Log the downloaded model
90
+ function log_downloaded_model {
91
+ echo "$LOGGING_NAME" >> "$DOWNLOAD_LOG"
92
+ }
93
+
94
+ # Function to check if the model has already been created
95
+ function is_model_created {
96
+ # 'ollama list' lists all models
97
+ ollama list | grep -q "$MODEL_NAME" && return 0 || return 1
98
+ }
99
+
100
+ # Check if huggingface-hub is installed, and install it if not
101
+ if ! pip show huggingface-hub > /dev/null; then
102
+ echo "Installing huggingface-hub..."
103
+ pip install -U "huggingface_hub[cli]"
104
+ else
105
+ echo "huggingface-hub is already installed."
106
+ fi
107
+
108
+ # Check if the model has already been downloaded
109
+ if is_model_downloaded; then
110
+ echo "Model $LOGGING_NAME has already been downloaded. Skipping download."
111
+ else
112
+ echo "Downloading model $LOGGING_NAME..."
113
+ # Download the model
114
+ huggingface-cli download $MODEL_PATH $GGUF_FILE --local-dir downloads --local-dir-use-symlinks False
115
+
116
+ # Log the downloaded model
117
+ log_downloaded_model
118
+ echo "Model $LOGGING_NAME downloaded and logged."
119
+ fi
120
+
121
+ # Check if Ollama is installed, and install it if not
122
+ if ! command -v ollama &> /dev/null; then
123
+ echo "Installing Ollama..."
124
+ curl -fsSL https://ollama.com/install.sh | sh
125
+ else
126
+ echo "Ollama is already installed."
127
+ fi
128
+
129
+ # Check if Ollama is already running
130
+ if pgrep -f 'ollama serve' > /dev/null; then
131
+ echo "Ollama is already running. Skipping the start."
132
+ else
133
+ echo "Starting Ollama..."
134
+ # Start Ollama in the background
135
+ ollama serve &
136
+
137
+ # Wait for Ollama to start
138
+ while true; do
139
+ if pgrep -f 'ollama serve' > /dev/null; then
140
+ echo "Ollama has started."
141
+ sleep 60
142
+ break
143
+ else
144
+ echo "Waiting for Ollama to start..."
145
+ sleep 1 # Wait for 1 second before checking again
146
+ fi
147
+ done
148
+ fi
149
+
150
+ # Check if the model has already been created
151
+ if is_model_created; then
152
+ echo "Model $MODEL_NAME is already created. Skipping creation."
153
+ else
154
+ echo "Creating model $MODEL_NAME..."
155
+ # Create the model in Ollama
156
+ # Prepare Modelfile with the downloaded path
157
+ echo "FROM ./downloads/$GGUF_FILE" > Modelfile
158
+ ollama create $MODEL_NAME -f Modelfile
159
+ echo "Model $MODEL_NAME created."
160
+ fi
161
+
162
+
163
+ echo "model name is > $MODEL_NAME"
164
+ echo "Use Ollama run $MODEL_NAME"
165
+ """)
166
+ # Make autollama.sh executable (using chmod)
167
+ os.chmod(script_path, 0o755)
168
+
169
+ # Initialize command list
170
+ command = ["bash", script_path, "-m", model_path, "-g", gguf_file]
171
+
172
+ # Execute the command
173
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
174
+
175
+ # Print the output and error in real-time
176
+ for line in process.stdout:
177
+ print(line, end='')
178
+
179
+ for line in process.stderr:
180
+ print(line, end='')
181
+
182
+ process.wait()
183
+
184
+ def main():
185
+ parser = argparse.ArgumentParser(description='Automatically create and run an Ollama model in Ollama')
186
+ parser.add_argument('-m', '--model_path', required=True, help='Set the hunggingface model id to the Hugging Face model')
187
+ parser.add_argument('-g', '--gguf_file', required=True, help='Set the GGUF file name')
188
+ args = parser.parse_args()
189
+
190
+ try:
191
+ autollama(args.model_path, args.gguf_file)
192
+ except Exception as e:
193
+ print(f"Error: {e}")
194
+ exit(1)
195
+
196
+ if __name__ == "__main__":
197
+ main()
198
+
@@ -0,0 +1,240 @@
1
+ # webscout/Extra/gguf.py
2
+ import subprocess
3
+ import argparse
4
+ import os
5
+
6
+ def convert(model_id, username=None, token=None, quantization_methods="q4_k_m,q5_k_m"):
7
+ """Converts and quantizes a Hugging Face model to GGUF format.
8
+
9
+ Args:
10
+ model_id (str): The Hugging Face model ID (e.g., 'google/flan-t5-xl').
11
+ username (str, optional): Your Hugging Face username. Required for uploads.
12
+ token (str, optional): Your Hugging Face API token. Required for uploads.
13
+ quantization_methods (str, optional): Comma-separated quantization methods.
14
+ Defaults to "q4_k_m,q5_k_m".
15
+
16
+ Raises:
17
+ ValueError: If an invalid quantization method is provided.
18
+ """
19
+
20
+ # List of valid quantization methods
21
+ valid_methods = [
22
+ "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
23
+ "q4_0", "q4_1", "q4_k_m", "q4_k_s",
24
+ "q5_0", "q5_1", "q5_k_m", "q5_k_s",
25
+ "q6_k", "q8_0"
26
+ ]
27
+
28
+ # Validate the selected quantization methods
29
+ selected_methods_list = quantization_methods.split(',')
30
+ for method in selected_methods_list:
31
+ if method not in valid_methods:
32
+ raise ValueError(f"Invalid method: {method}. Please select from the available methods: {', '.join(valid_methods)}")
33
+
34
+ # Construct the absolute path to the shell script
35
+ script_path = os.path.join(os.getcwd(), "gguf.sh")
36
+ if not os.path.exists(script_path):
37
+ # Create autollama.sh with the content provided
38
+ with open(script_path, "w") as f:
39
+ f.write("""
40
+ cat << "EOF"
41
+ Made with love in India
42
+ EOF
43
+
44
+ # Default values
45
+ MODEL_ID=""
46
+ USERNAME=""
47
+ TOKEN=""
48
+ QUANTIZATION_METHODS="q4_k_m,q5_k_m" # Default to "q4_k_m,q5_k_m" if not provided
49
+
50
+ # Display help/usage information
51
+ usage() {
52
+ echo "Usage: $0 -m MODEL_ID [-u USERNAME] [-t TOKEN] [-q QUANTIZATION_METHODS]"
53
+ echo
54
+ echo "Options:"
55
+ echo " -m MODEL_ID Required: Set the HF model ID"
56
+ echo " -u USERNAME Optional: Set the username"
57
+ echo " -t TOKEN Optional: Set the token"
58
+ echo " -q QUANTIZATION_METHODS Optional: Set the quantization methods (default: q4_k_m,q5_k_m)"
59
+ echo " -h Display this help and exit"
60
+ echo
61
+ }
62
+
63
+ # Parse command-line options
64
+ while getopts ":m:u:t:q:h" opt; do
65
+ case ${opt} in
66
+ m )
67
+ MODEL_ID=$OPTARG
68
+ ;;
69
+ u )
70
+ USERNAME=$OPTARG
71
+ ;;
72
+ t )
73
+ TOKEN=$OPTARG
74
+ ;;
75
+ q )
76
+ QUANTIZATION_METHODS=$OPTARG
77
+ ;;
78
+ h )
79
+ usage
80
+ exit 0
81
+ ;;
82
+ \? )
83
+ echo "Invalid Option: -$OPTARG" 1>&2
84
+ usage
85
+ exit 1
86
+ ;;
87
+ : )
88
+ echo "Invalid Option: -$OPTARG requires an argument" 1>&2
89
+ usage
90
+ exit 1
91
+ ;;
92
+ esac
93
+ done
94
+ shift $((OPTIND -1))
95
+
96
+ # Ensure MODEL_ID is provided
97
+ if [ -z "$MODEL_ID" ]; then
98
+ echo "Error: MODEL_ID is required."
99
+ usage
100
+ exit 1
101
+ fi
102
+
103
+ # # Echoing the arguments for checking
104
+ # echo "MODEL_ID: $MODEL_ID"
105
+ # echo "USERNAME: ${USERNAME:-'Not provided'}"
106
+ # echo "TOKEN: ${TOKEN:-'Not provided'}"
107
+ # echo "QUANTIZATION_METHODS: $QUANTIZATION_METHODS"
108
+
109
+ # Splitting string into an array for quantization methods, if provided
110
+ IFS=',' read -r -a QUANTIZATION_METHOD_ARRAY <<< "$QUANTIZATION_METHODS"
111
+ echo "Quantization Methods: ${QUANTIZATION_METHOD_ARRAY[@]}"
112
+
113
+ MODEL_NAME=$(echo "$MODEL_ID" | awk -F'/' '{print $NF}')
114
+
115
+
116
+ # ----------- llama.cpp setup block-----------
117
+ # Check if llama.cpp is already installed and skip the build step if it is
118
+ if [ ! -d "llama.cpp" ]; then
119
+ echo "llama.cpp not found. Cloning and setting up..."
120
+ git clone https://github.com/ggerganov/llama.cpp
121
+ cd llama.cpp && git pull
122
+ # Install required packages
123
+ pip3 install -r requirements.txt
124
+ # Build llama.cpp as it's freshly cloned
125
+ if ! command -v nvcc &> /dev/null
126
+ then
127
+ echo "nvcc could not be found, building llama without LLAMA_CUBLAS"
128
+ make clean && make
129
+ else
130
+ make clean && LLAMA_CUBLAS=1 make
131
+ fi
132
+ cd ..
133
+ else
134
+ echo "llama.cpp found. Assuming it's already built and up to date."
135
+ # Optionally, still update dependencies
136
+ # cd llama.cpp && pip3 install -r requirements.txt && cd ..
137
+ fi
138
+ # ----------- llama.cpp setup block-----------
139
+
140
+
141
+
142
+
143
+ # Download model
144
+ #todo : shall we put condition to check if model has been already downloaded? similar to autogguf?
145
+ echo "Downloading the model..."
146
+ huggingface-cli download "$MODEL_ID" --local-dir "./${MODEL_NAME}" --local-dir-use-symlinks False --revision main
147
+
148
+
149
+ # Convert to fp16
150
+ FP16="${MODEL_NAME}/${MODEL_NAME,,}.fp16.bin"
151
+ echo "Converting the model to fp16..."
152
+ python3 llama.cpp/convert-hf-to-gguf.py "$MODEL_NAME" --outtype f16 --outfile "$FP16"
153
+
154
+ # Quantize the model
155
+ echo "Quantizing the model..."
156
+ for METHOD in "${QUANTIZATION_METHOD_ARRAY[@]}"; do
157
+ QTYPE="${MODEL_NAME}/${MODEL_NAME,,}.${METHOD^^}.gguf"
158
+ ./llama.cpp/llama-quantize "$FP16" "$QTYPE" "$METHOD"
159
+ done
160
+
161
+
162
+ # Check if USERNAME and TOKEN are provided
163
+ if [[ -n "$USERNAME" && -n "$TOKEN" ]]; then
164
+
165
+ # Login to Hugging Face
166
+ echo "Logging in to Hugging Face..."
167
+ huggingface-cli login --token "$TOKEN"
168
+
169
+
170
+ # Uploading .gguf, .md files, and config.json
171
+ echo "Uploading .gguf, .md files, and config.json..."
172
+
173
+
174
+ # Define a temporary directory
175
+ TEMP_DIR="./temp_upload_dir"
176
+
177
+ # Create the temporary directory
178
+ mkdir -p "${TEMP_DIR}"
179
+
180
+ # Copy the specific files to the temporary directory
181
+ find "./${MODEL_NAME}" -type f \( -name "*.gguf" -o -name "*.md" -o -name "config.json" \) -exec cp {} "${TEMP_DIR}/" \;
182
+
183
+ # Upload the temporary directory to Hugging Face
184
+ huggingface-cli upload "${USERNAME}/${MODEL_NAME}-GGUF" "${TEMP_DIR}" --private
185
+
186
+ # Remove the temporary directory after upload
187
+ rm -rf "${TEMP_DIR}"
188
+ echo "Upload completed."
189
+ else
190
+ echo "USERNAME and TOKEN must be provided for upload."
191
+ fi
192
+
193
+ echo "Script completed."
194
+ """)
195
+ # Make autollama.sh executable (using chmod)
196
+ os.chmod(script_path, 0o755)
197
+
198
+ # Construct the command
199
+ command = ["bash", script_path, "-m", model_id]
200
+
201
+ if username:
202
+ command.extend(["-u", username])
203
+
204
+ if token:
205
+ command.extend(["-t", token])
206
+
207
+ if quantization_methods:
208
+ command.extend(["-q", quantization_methods])
209
+
210
+ # Execute the command
211
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
212
+
213
+ # Print the output and error in real-time
214
+ for line in process.stdout:
215
+ print(line, end='')
216
+
217
+ for line in process.stderr:
218
+ print(line, end='')
219
+
220
+ process.wait()
221
+
222
+
223
+ def main():
224
+ parser = argparse.ArgumentParser(description='Convert and quantize model using gguf.sh')
225
+ parser.add_argument('-m', '--model_id', required=True, help='Set the HF model ID (e.g., "google/flan-t5-xl")')
226
+ parser.add_argument('-u', '--username', help='Set your Hugging Face username (required for uploads)')
227
+ parser.add_argument('-t', '--token', help='Set your Hugging Face API token (required for uploads)')
228
+ parser.add_argument('-q', '--quantization_methods', default="q4_k_m,q5_k_m",
229
+ help='Comma-separated quantization methods (default: q4_k_m,q5_k_m). Valid methods: q2_k, q3_k_l, q3_k_m, q3_k_s, q4_0, q4_1, q4_k_m, q4_k_s, q5_0, q5_1, q5_k_m, q5_k_s, q6_k, q8_0')
230
+
231
+ args = parser.parse_args()
232
+
233
+ try:
234
+ convert(args.model_id, args.username, args.token, args.quantization_methods)
235
+ except ValueError as e:
236
+ print(e)
237
+ exit(1)
238
+
239
+ if __name__ == "__main__":
240
+ main()
@@ -11,7 +11,8 @@ from .LLM import LLM
11
11
  import g4f
12
12
  # Import provider classes for direct access
13
13
  from .Provider import *
14
- from .Extra import *
14
+ from .Extra import gguf
15
+ from .Extra import autollama
15
16
  __repo__ = "https://github.com/OE-LUCIFER/Webscout"
16
17
 
17
18
  webai = [
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 3.7
3
+ Version: 3.8
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -1599,7 +1599,7 @@ Webscout's `autollama` utility download model from huggingface and then automati
1599
1599
  ```python
1600
1600
  from webscout import autollama
1601
1601
 
1602
- autollama.autollama(
1602
+ autollama(
1603
1603
  model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1604
1604
  gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1605
1605
  )
@@ -1,47 +0,0 @@
1
- import subprocess
2
- import argparse
3
- import os
4
-
5
- def autollama(model_path, gguf_file):
6
- """Manages models with Ollama using the autollama.sh script.
7
-
8
- Args:
9
- model_path (str): The path to the Hugging Face model.
10
- gguf_file (str): The name of the GGUF file.
11
- """
12
-
13
- # Get the directory where this script is located
14
- script_dir = os.path.dirname(os.path.abspath(__file__))
15
-
16
- # Construct the path to the shell script
17
- script_path = os.path.join(script_dir, "autollama.sh")
18
-
19
- # Initialize command list
20
- command = ["bash", script_path, "-m", model_path, "-g", gguf_file]
21
-
22
- # Execute the command
23
- process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
24
-
25
- # Print the output and error in real-time
26
- for line in process.stdout:
27
- print(line, end='')
28
-
29
- for line in process.stderr:
30
- print(line, end='')
31
-
32
- process.wait()
33
-
34
- def main():
35
- parser = argparse.ArgumentParser(description='Run autollama.sh to manage models with Ollama')
36
- parser.add_argument('-m', '--model_path', required=True, help='Set the path to the Hugging Face model')
37
- parser.add_argument('-g', '--gguf_file', required=True, help='Set the GGUF file name')
38
- args = parser.parse_args()
39
-
40
- try:
41
- autollama(args.model_path, args.gguf_file)
42
- except Exception as e:
43
- print(f"Error: {e}")
44
- exit(1)
45
-
46
- if __name__ == "__main__":
47
- main()
@@ -1,80 +0,0 @@
1
- import subprocess
2
- import argparse
3
- import os
4
-
5
- def convert(model_id, username=None, token=None, quantization_methods="q4_k_m,q5_k_m"):
6
- """Converts and quantizes a Hugging Face model to GGUF format.
7
-
8
- Args:
9
- model_id (str): The Hugging Face model ID (e.g., 'google/flan-t5-xl').
10
- username (str, optional): Your Hugging Face username. Required for uploads.
11
- token (str, optional): Your Hugging Face API token. Required for uploads.
12
- quantization_methods (str, optional): Comma-separated quantization methods.
13
- Defaults to "q4_k_m,q5_k_m".
14
-
15
- Raises:
16
- ValueError: If an invalid quantization method is provided.
17
- """
18
-
19
- # List of valid quantization methods
20
- valid_methods = [
21
- "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
22
- "q4_0", "q4_1", "q4_k_m", "q4_k_s",
23
- "q5_0", "q5_1", "q5_k_m", "q5_k_s",
24
- "q6_k", "q8_0"
25
- ]
26
-
27
- # Validate the selected quantization methods
28
- selected_methods_list = quantization_methods.split(',')
29
- for method in selected_methods_list:
30
- if method not in valid_methods:
31
- raise ValueError(f"Invalid method: {method}. Please select from the available methods: {', '.join(valid_methods)}")
32
-
33
- # Get the directory where this script is located
34
- script_dir = os.path.dirname(os.path.abspath(__file__))
35
-
36
- # Construct the path to the shell script
37
- script_path = os.path.join(script_dir, "gguf.sh")
38
-
39
- # Construct the command
40
- command = ["bash", script_path, "-m", model_id]
41
-
42
- if username:
43
- command.extend(["-u", username])
44
-
45
- if token:
46
- command.extend(["-t", token])
47
-
48
- if quantization_methods:
49
- command.extend(["-q", quantization_methods])
50
-
51
- # Execute the command
52
- process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
53
-
54
- # Print the output and error in real-time
55
- for line in process.stdout:
56
- print(line, end='')
57
-
58
- for line in process.stderr:
59
- print(line, end='')
60
-
61
- process.wait()
62
-
63
- def main():
64
- parser = argparse.ArgumentParser(description='Convert and quantize model using gguf.sh')
65
- parser.add_argument('-m', '--model_id', required=True, help='Set the HF model ID (e.g., "google/flan-t5-xl")')
66
- parser.add_argument('-u', '--username', help='Set your Hugging Face username (required for uploads)')
67
- parser.add_argument('-t', '--token', help='Set your Hugging Face API token (required for uploads)')
68
- parser.add_argument('-q', '--quantization_methods', default="q4_k_m,q5_k_m",
69
- help='Comma-separated quantization methods (default: q4_k_m,q5_k_m). Valid methods: q2_k, q3_k_l, q3_k_m, q3_k_s, q4_0, q4_1, q4_k_m, q4_k_s, q5_0, q5_1, q5_k_m, q5_k_s, q6_k, q8_0')
70
-
71
- args = parser.parse_args()
72
-
73
- try:
74
- convert(args.model_id, args.username, args.token, args.quantization_methods)
75
- except ValueError as e:
76
- print(e)
77
- exit(1)
78
-
79
- if __name__ == "__main__":
80
- main()
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes