webscout 3.7__py3-none-any.whl → 3.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Extra/autollama.py +158 -7
- webscout/Extra/gguf.py +164 -4
- webscout/__init__.py +2 -1
- {webscout-3.7.dist-info → webscout-3.8.dist-info}/METADATA +2 -2
- {webscout-3.7.dist-info → webscout-3.8.dist-info}/RECORD +9 -9
- {webscout-3.7.dist-info → webscout-3.8.dist-info}/LICENSE.md +0 -0
- {webscout-3.7.dist-info → webscout-3.8.dist-info}/WHEEL +0 -0
- {webscout-3.7.dist-info → webscout-3.8.dist-info}/entry_points.txt +0 -0
- {webscout-3.7.dist-info → webscout-3.8.dist-info}/top_level.txt +0 -0
webscout/Extra/autollama.py
CHANGED
|
@@ -10,11 +10,161 @@ def autollama(model_path, gguf_file):
|
|
|
10
10
|
gguf_file (str): The name of the GGUF file.
|
|
11
11
|
"""
|
|
12
12
|
|
|
13
|
-
#
|
|
14
|
-
|
|
13
|
+
# Check if autollama.sh exists in the current working directory
|
|
14
|
+
script_path = os.path.join(os.getcwd(), "autollama.sh")
|
|
15
|
+
if not os.path.exists(script_path):
|
|
16
|
+
# Create autollama.sh with the content provided
|
|
17
|
+
with open(script_path, "w") as f:
|
|
18
|
+
f.write("""
|
|
19
|
+
function show_art() {
|
|
20
|
+
cat << "EOF"
|
|
21
|
+
Made with love in India
|
|
22
|
+
EOF
|
|
23
|
+
}
|
|
15
24
|
|
|
16
|
-
|
|
17
|
-
|
|
25
|
+
show_art
|
|
26
|
+
|
|
27
|
+
# Initialize default values
|
|
28
|
+
MODEL_PATH=""
|
|
29
|
+
GGUF_FILE=""
|
|
30
|
+
|
|
31
|
+
# Display help/usage information
|
|
32
|
+
usage() {
|
|
33
|
+
echo "Usage: $0 -m <model_path> -g <gguf_file>"
|
|
34
|
+
echo
|
|
35
|
+
echo "Options:"
|
|
36
|
+
echo " -m <model_path> Set the path to the model"
|
|
37
|
+
echo " -g <gguf_file> Set the GGUF file name"
|
|
38
|
+
echo " -h Display this help and exit"
|
|
39
|
+
echo
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
# Parse command-line options
|
|
43
|
+
while getopts ":m:g:h" opt; do
|
|
44
|
+
case ${opt} in
|
|
45
|
+
m )
|
|
46
|
+
MODEL_PATH=$OPTARG
|
|
47
|
+
;;
|
|
48
|
+
g )
|
|
49
|
+
GGUF_FILE=$OPTARG
|
|
50
|
+
;;
|
|
51
|
+
h )
|
|
52
|
+
usage
|
|
53
|
+
exit 0
|
|
54
|
+
;;
|
|
55
|
+
\? )
|
|
56
|
+
echo "Invalid Option: -$OPTARG" 1>&2
|
|
57
|
+
usage
|
|
58
|
+
exit 1
|
|
59
|
+
;;
|
|
60
|
+
: )
|
|
61
|
+
echo "Invalid Option: -$OPTARG requires an argument" 1>&2
|
|
62
|
+
usage
|
|
63
|
+
exit 1
|
|
64
|
+
;;
|
|
65
|
+
esac
|
|
66
|
+
done
|
|
67
|
+
|
|
68
|
+
# Check required parameters
|
|
69
|
+
if [ -z "$MODEL_PATH" ] || [ -z "$GGUF_FILE" ]; then
|
|
70
|
+
echo "Error: -m (model_path) and -g (gguf_file) are required."
|
|
71
|
+
usage
|
|
72
|
+
exit 1
|
|
73
|
+
fi
|
|
74
|
+
|
|
75
|
+
# Derive MODEL_NAME
|
|
76
|
+
MODEL_NAME=$(echo $GGUF_FILE | sed 's/\(.*\)\.Q4.*/\\1/')
|
|
77
|
+
|
|
78
|
+
# Log file where downloaded models are recorded
|
|
79
|
+
DOWNLOAD_LOG="downloaded_models.log"
|
|
80
|
+
|
|
81
|
+
# Composite logging name
|
|
82
|
+
LOGGING_NAME="${MODEL_PATH}_${MODEL_NAME}"
|
|
83
|
+
|
|
84
|
+
# Check if the model has been downloaded
|
|
85
|
+
function is_model_downloaded {
|
|
86
|
+
grep -qxF "$LOGGING_NAME" "$DOWNLOAD_LOG" && return 0 || return 1
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
# Log the downloaded model
|
|
90
|
+
function log_downloaded_model {
|
|
91
|
+
echo "$LOGGING_NAME" >> "$DOWNLOAD_LOG"
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
# Function to check if the model has already been created
|
|
95
|
+
function is_model_created {
|
|
96
|
+
# 'ollama list' lists all models
|
|
97
|
+
ollama list | grep -q "$MODEL_NAME" && return 0 || return 1
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
# Check if huggingface-hub is installed, and install it if not
|
|
101
|
+
if ! pip show huggingface-hub > /dev/null; then
|
|
102
|
+
echo "Installing huggingface-hub..."
|
|
103
|
+
pip install -U "huggingface_hub[cli]"
|
|
104
|
+
else
|
|
105
|
+
echo "huggingface-hub is already installed."
|
|
106
|
+
fi
|
|
107
|
+
|
|
108
|
+
# Check if the model has already been downloaded
|
|
109
|
+
if is_model_downloaded; then
|
|
110
|
+
echo "Model $LOGGING_NAME has already been downloaded. Skipping download."
|
|
111
|
+
else
|
|
112
|
+
echo "Downloading model $LOGGING_NAME..."
|
|
113
|
+
# Download the model
|
|
114
|
+
huggingface-cli download $MODEL_PATH $GGUF_FILE --local-dir downloads --local-dir-use-symlinks False
|
|
115
|
+
|
|
116
|
+
# Log the downloaded model
|
|
117
|
+
log_downloaded_model
|
|
118
|
+
echo "Model $LOGGING_NAME downloaded and logged."
|
|
119
|
+
fi
|
|
120
|
+
|
|
121
|
+
# Check if Ollama is installed, and install it if not
|
|
122
|
+
if ! command -v ollama &> /dev/null; then
|
|
123
|
+
echo "Installing Ollama..."
|
|
124
|
+
curl -fsSL https://ollama.com/install.sh | sh
|
|
125
|
+
else
|
|
126
|
+
echo "Ollama is already installed."
|
|
127
|
+
fi
|
|
128
|
+
|
|
129
|
+
# Check if Ollama is already running
|
|
130
|
+
if pgrep -f 'ollama serve' > /dev/null; then
|
|
131
|
+
echo "Ollama is already running. Skipping the start."
|
|
132
|
+
else
|
|
133
|
+
echo "Starting Ollama..."
|
|
134
|
+
# Start Ollama in the background
|
|
135
|
+
ollama serve &
|
|
136
|
+
|
|
137
|
+
# Wait for Ollama to start
|
|
138
|
+
while true; do
|
|
139
|
+
if pgrep -f 'ollama serve' > /dev/null; then
|
|
140
|
+
echo "Ollama has started."
|
|
141
|
+
sleep 60
|
|
142
|
+
break
|
|
143
|
+
else
|
|
144
|
+
echo "Waiting for Ollama to start..."
|
|
145
|
+
sleep 1 # Wait for 1 second before checking again
|
|
146
|
+
fi
|
|
147
|
+
done
|
|
148
|
+
fi
|
|
149
|
+
|
|
150
|
+
# Check if the model has already been created
|
|
151
|
+
if is_model_created; then
|
|
152
|
+
echo "Model $MODEL_NAME is already created. Skipping creation."
|
|
153
|
+
else
|
|
154
|
+
echo "Creating model $MODEL_NAME..."
|
|
155
|
+
# Create the model in Ollama
|
|
156
|
+
# Prepare Modelfile with the downloaded path
|
|
157
|
+
echo "FROM ./downloads/$GGUF_FILE" > Modelfile
|
|
158
|
+
ollama create $MODEL_NAME -f Modelfile
|
|
159
|
+
echo "Model $MODEL_NAME created."
|
|
160
|
+
fi
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
echo "model name is > $MODEL_NAME"
|
|
164
|
+
echo "Use Ollama run $MODEL_NAME"
|
|
165
|
+
""")
|
|
166
|
+
# Make autollama.sh executable (using chmod)
|
|
167
|
+
os.chmod(script_path, 0o755)
|
|
18
168
|
|
|
19
169
|
# Initialize command list
|
|
20
170
|
command = ["bash", script_path, "-m", model_path, "-g", gguf_file]
|
|
@@ -32,8 +182,8 @@ def autollama(model_path, gguf_file):
|
|
|
32
182
|
process.wait()
|
|
33
183
|
|
|
34
184
|
def main():
|
|
35
|
-
parser = argparse.ArgumentParser(description='
|
|
36
|
-
parser.add_argument('-m', '--model_path', required=True, help='Set the
|
|
185
|
+
parser = argparse.ArgumentParser(description='Automatically create and run an Ollama model in Ollama')
|
|
186
|
+
parser.add_argument('-m', '--model_path', required=True, help='Set the hunggingface model id to the Hugging Face model')
|
|
37
187
|
parser.add_argument('-g', '--gguf_file', required=True, help='Set the GGUF file name')
|
|
38
188
|
args = parser.parse_args()
|
|
39
189
|
|
|
@@ -44,4 +194,5 @@ def main():
|
|
|
44
194
|
exit(1)
|
|
45
195
|
|
|
46
196
|
if __name__ == "__main__":
|
|
47
|
-
main()
|
|
197
|
+
main()
|
|
198
|
+
|
webscout/Extra/gguf.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
# webscout/Extra/gguf.py
|
|
1
2
|
import subprocess
|
|
2
3
|
import argparse
|
|
3
4
|
import os
|
|
@@ -30,11 +31,169 @@ def convert(model_id, username=None, token=None, quantization_methods="q4_k_m,q5
|
|
|
30
31
|
if method not in valid_methods:
|
|
31
32
|
raise ValueError(f"Invalid method: {method}. Please select from the available methods: {', '.join(valid_methods)}")
|
|
32
33
|
|
|
33
|
-
|
|
34
|
-
|
|
34
|
+
# Construct the absolute path to the shell script
|
|
35
|
+
script_path = os.path.join(os.getcwd(), "gguf.sh")
|
|
36
|
+
if not os.path.exists(script_path):
|
|
37
|
+
# Create autollama.sh with the content provided
|
|
38
|
+
with open(script_path, "w") as f:
|
|
39
|
+
f.write("""
|
|
40
|
+
cat << "EOF"
|
|
41
|
+
Made with love in India
|
|
42
|
+
EOF
|
|
35
43
|
|
|
36
|
-
|
|
37
|
-
|
|
44
|
+
# Default values
|
|
45
|
+
MODEL_ID=""
|
|
46
|
+
USERNAME=""
|
|
47
|
+
TOKEN=""
|
|
48
|
+
QUANTIZATION_METHODS="q4_k_m,q5_k_m" # Default to "q4_k_m,q5_k_m" if not provided
|
|
49
|
+
|
|
50
|
+
# Display help/usage information
|
|
51
|
+
usage() {
|
|
52
|
+
echo "Usage: $0 -m MODEL_ID [-u USERNAME] [-t TOKEN] [-q QUANTIZATION_METHODS]"
|
|
53
|
+
echo
|
|
54
|
+
echo "Options:"
|
|
55
|
+
echo " -m MODEL_ID Required: Set the HF model ID"
|
|
56
|
+
echo " -u USERNAME Optional: Set the username"
|
|
57
|
+
echo " -t TOKEN Optional: Set the token"
|
|
58
|
+
echo " -q QUANTIZATION_METHODS Optional: Set the quantization methods (default: q4_k_m,q5_k_m)"
|
|
59
|
+
echo " -h Display this help and exit"
|
|
60
|
+
echo
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
# Parse command-line options
|
|
64
|
+
while getopts ":m:u:t:q:h" opt; do
|
|
65
|
+
case ${opt} in
|
|
66
|
+
m )
|
|
67
|
+
MODEL_ID=$OPTARG
|
|
68
|
+
;;
|
|
69
|
+
u )
|
|
70
|
+
USERNAME=$OPTARG
|
|
71
|
+
;;
|
|
72
|
+
t )
|
|
73
|
+
TOKEN=$OPTARG
|
|
74
|
+
;;
|
|
75
|
+
q )
|
|
76
|
+
QUANTIZATION_METHODS=$OPTARG
|
|
77
|
+
;;
|
|
78
|
+
h )
|
|
79
|
+
usage
|
|
80
|
+
exit 0
|
|
81
|
+
;;
|
|
82
|
+
\? )
|
|
83
|
+
echo "Invalid Option: -$OPTARG" 1>&2
|
|
84
|
+
usage
|
|
85
|
+
exit 1
|
|
86
|
+
;;
|
|
87
|
+
: )
|
|
88
|
+
echo "Invalid Option: -$OPTARG requires an argument" 1>&2
|
|
89
|
+
usage
|
|
90
|
+
exit 1
|
|
91
|
+
;;
|
|
92
|
+
esac
|
|
93
|
+
done
|
|
94
|
+
shift $((OPTIND -1))
|
|
95
|
+
|
|
96
|
+
# Ensure MODEL_ID is provided
|
|
97
|
+
if [ -z "$MODEL_ID" ]; then
|
|
98
|
+
echo "Error: MODEL_ID is required."
|
|
99
|
+
usage
|
|
100
|
+
exit 1
|
|
101
|
+
fi
|
|
102
|
+
|
|
103
|
+
# # Echoing the arguments for checking
|
|
104
|
+
# echo "MODEL_ID: $MODEL_ID"
|
|
105
|
+
# echo "USERNAME: ${USERNAME:-'Not provided'}"
|
|
106
|
+
# echo "TOKEN: ${TOKEN:-'Not provided'}"
|
|
107
|
+
# echo "QUANTIZATION_METHODS: $QUANTIZATION_METHODS"
|
|
108
|
+
|
|
109
|
+
# Splitting string into an array for quantization methods, if provided
|
|
110
|
+
IFS=',' read -r -a QUANTIZATION_METHOD_ARRAY <<< "$QUANTIZATION_METHODS"
|
|
111
|
+
echo "Quantization Methods: ${QUANTIZATION_METHOD_ARRAY[@]}"
|
|
112
|
+
|
|
113
|
+
MODEL_NAME=$(echo "$MODEL_ID" | awk -F'/' '{print $NF}')
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
# ----------- llama.cpp setup block-----------
|
|
117
|
+
# Check if llama.cpp is already installed and skip the build step if it is
|
|
118
|
+
if [ ! -d "llama.cpp" ]; then
|
|
119
|
+
echo "llama.cpp not found. Cloning and setting up..."
|
|
120
|
+
git clone https://github.com/ggerganov/llama.cpp
|
|
121
|
+
cd llama.cpp && git pull
|
|
122
|
+
# Install required packages
|
|
123
|
+
pip3 install -r requirements.txt
|
|
124
|
+
# Build llama.cpp as it's freshly cloned
|
|
125
|
+
if ! command -v nvcc &> /dev/null
|
|
126
|
+
then
|
|
127
|
+
echo "nvcc could not be found, building llama without LLAMA_CUBLAS"
|
|
128
|
+
make clean && make
|
|
129
|
+
else
|
|
130
|
+
make clean && LLAMA_CUBLAS=1 make
|
|
131
|
+
fi
|
|
132
|
+
cd ..
|
|
133
|
+
else
|
|
134
|
+
echo "llama.cpp found. Assuming it's already built and up to date."
|
|
135
|
+
# Optionally, still update dependencies
|
|
136
|
+
# cd llama.cpp && pip3 install -r requirements.txt && cd ..
|
|
137
|
+
fi
|
|
138
|
+
# ----------- llama.cpp setup block-----------
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
# Download model
|
|
144
|
+
#todo : shall we put condition to check if model has been already downloaded? similar to autogguf?
|
|
145
|
+
echo "Downloading the model..."
|
|
146
|
+
huggingface-cli download "$MODEL_ID" --local-dir "./${MODEL_NAME}" --local-dir-use-symlinks False --revision main
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
# Convert to fp16
|
|
150
|
+
FP16="${MODEL_NAME}/${MODEL_NAME,,}.fp16.bin"
|
|
151
|
+
echo "Converting the model to fp16..."
|
|
152
|
+
python3 llama.cpp/convert-hf-to-gguf.py "$MODEL_NAME" --outtype f16 --outfile "$FP16"
|
|
153
|
+
|
|
154
|
+
# Quantize the model
|
|
155
|
+
echo "Quantizing the model..."
|
|
156
|
+
for METHOD in "${QUANTIZATION_METHOD_ARRAY[@]}"; do
|
|
157
|
+
QTYPE="${MODEL_NAME}/${MODEL_NAME,,}.${METHOD^^}.gguf"
|
|
158
|
+
./llama.cpp/llama-quantize "$FP16" "$QTYPE" "$METHOD"
|
|
159
|
+
done
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
# Check if USERNAME and TOKEN are provided
|
|
163
|
+
if [[ -n "$USERNAME" && -n "$TOKEN" ]]; then
|
|
164
|
+
|
|
165
|
+
# Login to Hugging Face
|
|
166
|
+
echo "Logging in to Hugging Face..."
|
|
167
|
+
huggingface-cli login --token "$TOKEN"
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
# Uploading .gguf, .md files, and config.json
|
|
171
|
+
echo "Uploading .gguf, .md files, and config.json..."
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
# Define a temporary directory
|
|
175
|
+
TEMP_DIR="./temp_upload_dir"
|
|
176
|
+
|
|
177
|
+
# Create the temporary directory
|
|
178
|
+
mkdir -p "${TEMP_DIR}"
|
|
179
|
+
|
|
180
|
+
# Copy the specific files to the temporary directory
|
|
181
|
+
find "./${MODEL_NAME}" -type f \( -name "*.gguf" -o -name "*.md" -o -name "config.json" \) -exec cp {} "${TEMP_DIR}/" \;
|
|
182
|
+
|
|
183
|
+
# Upload the temporary directory to Hugging Face
|
|
184
|
+
huggingface-cli upload "${USERNAME}/${MODEL_NAME}-GGUF" "${TEMP_DIR}" --private
|
|
185
|
+
|
|
186
|
+
# Remove the temporary directory after upload
|
|
187
|
+
rm -rf "${TEMP_DIR}"
|
|
188
|
+
echo "Upload completed."
|
|
189
|
+
else
|
|
190
|
+
echo "USERNAME and TOKEN must be provided for upload."
|
|
191
|
+
fi
|
|
192
|
+
|
|
193
|
+
echo "Script completed."
|
|
194
|
+
""")
|
|
195
|
+
# Make autollama.sh executable (using chmod)
|
|
196
|
+
os.chmod(script_path, 0o755)
|
|
38
197
|
|
|
39
198
|
# Construct the command
|
|
40
199
|
command = ["bash", script_path, "-m", model_id]
|
|
@@ -60,6 +219,7 @@ def convert(model_id, username=None, token=None, quantization_methods="q4_k_m,q5
|
|
|
60
219
|
|
|
61
220
|
process.wait()
|
|
62
221
|
|
|
222
|
+
|
|
63
223
|
def main():
|
|
64
224
|
parser = argparse.ArgumentParser(description='Convert and quantize model using gguf.sh')
|
|
65
225
|
parser.add_argument('-m', '--model_id', required=True, help='Set the HF model ID (e.g., "google/flan-t5-xl")')
|
webscout/__init__.py
CHANGED
|
@@ -11,7 +11,8 @@ from .LLM import LLM
|
|
|
11
11
|
import g4f
|
|
12
12
|
# Import provider classes for direct access
|
|
13
13
|
from .Provider import *
|
|
14
|
-
from .Extra import
|
|
14
|
+
from .Extra import gguf
|
|
15
|
+
from .Extra import autollama
|
|
15
16
|
__repo__ = "https://github.com/OE-LUCIFER/Webscout"
|
|
16
17
|
|
|
17
18
|
webai = [
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 3.
|
|
3
|
+
Version: 3.8
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -1599,7 +1599,7 @@ Webscout's `autollama` utility download model from huggingface and then automati
|
|
|
1599
1599
|
```python
|
|
1600
1600
|
from webscout import autollama
|
|
1601
1601
|
|
|
1602
|
-
autollama
|
|
1602
|
+
autollama(
|
|
1603
1603
|
model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
|
|
1604
1604
|
gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
|
|
1605
1605
|
)
|
|
@@ -3,7 +3,7 @@ webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
|
|
|
3
3
|
webscout/AIutel.py,sha256=MMfUvTQXYDtaFXsXtwKgv9V_qMK6WgOxdx7Wagdm2Lw,33542
|
|
4
4
|
webscout/DWEBS.py,sha256=QLuT1IKu0lnwdl7W6c-ctBAO7Jj0Zk3PYm6-13BC7rU,25740
|
|
5
5
|
webscout/LLM.py,sha256=LbGCZdJf8A5dwfoGS4tyy39tAh5BDdhMZP0ScKaaQfU,4184
|
|
6
|
-
webscout/__init__.py,sha256=
|
|
6
|
+
webscout/__init__.py,sha256=427p-hwpbqr0RdzzkvRAPYfSCAyiPte-JeHnUWG_nHA,2091
|
|
7
7
|
webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
|
|
8
8
|
webscout/async_providers.py,sha256=holBv5SxanxVXc_92CBBaXHlB2IakB_fHnhyZaFjYF8,684
|
|
9
9
|
webscout/cli.py,sha256=enw_dPTCG3sNC1TXt96XccnpRmF4Etr99nh-RbGYags,18784
|
|
@@ -20,8 +20,8 @@ webscout/webscout_search.py,sha256=lFAot1-Qil_YfXieeLakDVDEX8Ckcima4ueXdOYwiMc,4
|
|
|
20
20
|
webscout/webscout_search_async.py,sha256=dooKGwLm0cwTml55Vy6NHPPY-nymEqX2h8laX94Zg5A,14537
|
|
21
21
|
webscout/websx_search.py,sha256=n-qVwiHozJEF-GFRPcAfh4k1d_tscTmDe1dNL-1ngcU,12094
|
|
22
22
|
webscout/Extra/__init__.py,sha256=vlW4RoSl5v3d7j_Yq1XEMydrG9JM-On_afgK-HtRZsk,45
|
|
23
|
-
webscout/Extra/autollama.py,sha256=
|
|
24
|
-
webscout/Extra/gguf.py,sha256=
|
|
23
|
+
webscout/Extra/autollama.py,sha256=DDdnb1tKEZWJaADVn9GXTZkMSwLKCcUGIjMKNlOBtK8,5419
|
|
24
|
+
webscout/Extra/gguf.py,sha256=5zTNE5HxM_VQ5ONoocL8GG5fRXrgyLdEEjNzndG0oUw,7811
|
|
25
25
|
webscout/Local/__init__.py,sha256=RN6klpbabPGNX2YzPm_hdeUcQvieUwvJt22uAO2RKSM,238
|
|
26
26
|
webscout/Local/_version.py,sha256=3sFn1tDa2mT9Pb1-OGW4K3_zbiJ0mhPRqB2rnLfp28Q,83
|
|
27
27
|
webscout/Local/formats.py,sha256=BiZZSoN3e8S6-S-ykBL9ogSUs0vK11GaZ3ghc9U8GRk,18994
|
|
@@ -54,9 +54,9 @@ webscout/Provider/Xjai.py,sha256=BIlk2ouz9Kh_0Gg9hPvTqhI7XtcmWdg5vHSX_4uGrIs,903
|
|
|
54
54
|
webscout/Provider/Yepchat.py,sha256=2Eit-A7w1ph1GQKNQuur_yaDzI64r0yBGxCIjDefJxQ,19875
|
|
55
55
|
webscout/Provider/Youchat.py,sha256=UVGBuGSjv4uRibn1xflmCjYcfrRTKnDvX3adhag6T98,7976
|
|
56
56
|
webscout/Provider/__init__.py,sha256=RaMdtYv7eQJ2vB8jXUHrkfNbx2DgRjbwc6DI40cOH1A,1809
|
|
57
|
-
webscout-3.
|
|
58
|
-
webscout-3.
|
|
59
|
-
webscout-3.
|
|
60
|
-
webscout-3.
|
|
61
|
-
webscout-3.
|
|
62
|
-
webscout-3.
|
|
57
|
+
webscout-3.8.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
|
|
58
|
+
webscout-3.8.dist-info/METADATA,sha256=hCQOSbTlIUBKkcTP9W8hPxuo0yXhkMbRHi7E0bSg4S4,69105
|
|
59
|
+
webscout-3.8.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
|
|
60
|
+
webscout-3.8.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
|
|
61
|
+
webscout-3.8.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
|
|
62
|
+
webscout-3.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|