webscout 6.0__py3-none-any.whl → 6.2b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +77 -259
- webscout/Agents/Onlinesearcher.py +22 -10
- webscout/Agents/functioncall.py +2 -2
- webscout/Bard.py +21 -21
- webscout/Extra/autollama.py +37 -20
- webscout/Local/__init__.py +6 -7
- webscout/Local/formats.py +404 -194
- webscout/Local/model.py +1074 -477
- webscout/Local/samplers.py +108 -144
- webscout/Local/thread.py +251 -410
- webscout/Local/ui.py +401 -0
- webscout/Local/utils.py +338 -136
- webscout/Provider/Amigo.py +51 -38
- webscout/Provider/Deepseek.py +7 -6
- webscout/Provider/EDITEE.py +2 -2
- webscout/Provider/GPTWeb.py +1 -1
- webscout/Provider/NinjaChat.py +200 -0
- webscout/Provider/OLLAMA.py +1 -1
- webscout/Provider/Perplexity.py +1 -1
- webscout/Provider/Reka.py +12 -5
- webscout/Provider/TTI/AIuncensored.py +103 -0
- webscout/Provider/TTI/Nexra.py +3 -3
- webscout/Provider/TTI/__init__.py +3 -2
- webscout/Provider/TTI/aiforce.py +2 -2
- webscout/Provider/TTI/imgninza.py +136 -0
- webscout/Provider/TeachAnything.py +0 -3
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +12 -11
- webscout/Provider/{ChatHub.py → aimathgpt.py} +72 -88
- webscout/Provider/cerebras.py +125 -118
- webscout/Provider/cleeai.py +1 -1
- webscout/Provider/felo_search.py +1 -1
- webscout/Provider/gaurish.py +207 -0
- webscout/Provider/geminiprorealtime.py +160 -0
- webscout/Provider/genspark.py +1 -1
- webscout/Provider/julius.py +8 -3
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/promptrefine.py +3 -1
- webscout/Provider/turboseek.py +3 -8
- webscout/Provider/tutorai.py +1 -1
- webscout/__init__.py +2 -43
- webscout/exceptions.py +5 -1
- webscout/tempid.py +4 -73
- webscout/utils.py +3 -0
- webscout/version.py +1 -1
- webscout/webai.py +1 -1
- webscout/webscout_search.py +154 -123
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/METADATA +156 -236
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/RECORD +53 -54
- webscout/Local/rawdog.py +0 -946
- webscout/Provider/BasedGPT.py +0 -214
- webscout/Provider/TTI/amigo.py +0 -148
- webscout/Provider/aigames.py +0 -213
- webscout/Provider/bixin.py +0 -264
- webscout/Provider/xdash.py +0 -182
- webscout/websx_search.py +0 -19
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/LICENSE.md +0 -0
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/WHEEL +0 -0
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/entry_points.txt +0 -0
- {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/top_level.txt +0 -0
webscout/Extra/autollama.py
CHANGED
|
@@ -5,10 +5,10 @@ import sys
|
|
|
5
5
|
import subprocess
|
|
6
6
|
import logging
|
|
7
7
|
import psutil
|
|
8
|
-
from huggingface_hub import
|
|
8
|
+
from huggingface_hub import hf_hub_download # Updated import
|
|
9
9
|
import colorlog
|
|
10
|
-
import ollama
|
|
11
|
-
import argparse
|
|
10
|
+
import ollama
|
|
11
|
+
import argparse
|
|
12
12
|
|
|
13
13
|
# Suppress specific warnings
|
|
14
14
|
warnings.filterwarnings(
|
|
@@ -36,7 +36,6 @@ if not logger.hasHandlers():
|
|
|
36
36
|
logger.addHandler(handler)
|
|
37
37
|
logger.setLevel(logging.INFO)
|
|
38
38
|
|
|
39
|
-
# Redirect warnings to the logger but avoid duplication
|
|
40
39
|
logging.captureWarnings(True)
|
|
41
40
|
py_warnings_logger = logging.getLogger("py.warnings")
|
|
42
41
|
if not py_warnings_logger.hasHandlers():
|
|
@@ -76,11 +75,37 @@ def is_model_created(model_name):
|
|
|
76
75
|
|
|
77
76
|
|
|
78
77
|
def download_model(repo_id, filename, token, cache_dir="downloads"):
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
78
|
+
"""
|
|
79
|
+
Downloads a model file from the Hugging Face Hub using hf_hub_download.
|
|
80
|
+
"""
|
|
81
|
+
try:
|
|
82
|
+
os.makedirs(cache_dir, exist_ok=True)
|
|
83
|
+
|
|
84
|
+
# Download using hf_hub_download
|
|
85
|
+
filepath = hf_hub_download(
|
|
86
|
+
repo_id=repo_id,
|
|
87
|
+
filename=filename,
|
|
88
|
+
token=token,
|
|
89
|
+
cache_dir=cache_dir,
|
|
90
|
+
resume_download=True,
|
|
91
|
+
force_download=False,
|
|
92
|
+
local_files_only=False
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Ensure file is in the expected location
|
|
96
|
+
expected_path = os.path.join(cache_dir, filename)
|
|
97
|
+
if filepath != expected_path:
|
|
98
|
+
os.makedirs(os.path.dirname(expected_path), exist_ok=True)
|
|
99
|
+
if not os.path.exists(expected_path):
|
|
100
|
+
import shutil
|
|
101
|
+
shutil.copy2(filepath, expected_path)
|
|
102
|
+
filepath = expected_path
|
|
103
|
+
|
|
104
|
+
return filepath
|
|
105
|
+
|
|
106
|
+
except Exception as e:
|
|
107
|
+
logger.error(f"Error downloading model: {str(e)}")
|
|
108
|
+
raise
|
|
84
109
|
|
|
85
110
|
|
|
86
111
|
def is_ollama_running():
|
|
@@ -90,16 +115,14 @@ def is_ollama_running():
|
|
|
90
115
|
return False
|
|
91
116
|
|
|
92
117
|
|
|
93
|
-
def main(model_path=None, gguf_file=None):
|
|
118
|
+
def main(model_path=None, gguf_file=None):
|
|
94
119
|
show_art()
|
|
95
120
|
|
|
96
|
-
# Parse command-line arguments if provided
|
|
97
121
|
parser = argparse.ArgumentParser(description="Download and create an Ollama model")
|
|
98
122
|
parser.add_argument("-m", "--model_path", help="Path to the model on Hugging Face Hub")
|
|
99
123
|
parser.add_argument("-g", "--gguf_file", help="Name of the GGUF file")
|
|
100
124
|
args = parser.parse_args()
|
|
101
125
|
|
|
102
|
-
# Use arguments from command line or function parameters
|
|
103
126
|
model_path = args.model_path if args.model_path else model_path
|
|
104
127
|
gguf_file = args.gguf_file if args.gguf_file else gguf_file
|
|
105
128
|
|
|
@@ -112,12 +135,10 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
|
|
|
112
135
|
download_log = "downloaded_models.log"
|
|
113
136
|
logging_name = f"{model_path}_{model_name}"
|
|
114
137
|
|
|
115
|
-
# Ensure the log file exists
|
|
116
138
|
if not os.path.exists(download_log):
|
|
117
139
|
with open(download_log, 'w') as f:
|
|
118
140
|
pass
|
|
119
141
|
|
|
120
|
-
# Check if huggingface-hub is installed, and install it if not
|
|
121
142
|
try:
|
|
122
143
|
subprocess.check_output(['pip', 'show', 'huggingface-hub'])
|
|
123
144
|
except subprocess.CalledProcessError:
|
|
@@ -126,7 +147,6 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
|
|
|
126
147
|
else:
|
|
127
148
|
logger.info("huggingface-hub is already installed.")
|
|
128
149
|
|
|
129
|
-
# Check if the model has already been downloaded
|
|
130
150
|
if is_model_downloaded(logging_name, download_log):
|
|
131
151
|
logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
|
|
132
152
|
else:
|
|
@@ -134,13 +154,11 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
|
|
|
134
154
|
token = os.getenv('HUGGINGFACE_TOKEN', None)
|
|
135
155
|
if not token:
|
|
136
156
|
logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
|
|
137
|
-
|
|
138
|
-
|
|
157
|
+
|
|
139
158
|
filepath = download_model(model_path, gguf_file, token)
|
|
140
159
|
log_downloaded_model(logging_name, download_log)
|
|
141
160
|
logger.info(f"Model {logging_name} downloaded and logged.")
|
|
142
161
|
|
|
143
|
-
# Check if Ollama is installed, and install it if not
|
|
144
162
|
try:
|
|
145
163
|
subprocess.check_output(['ollama', '--version'])
|
|
146
164
|
except subprocess.CalledProcessError:
|
|
@@ -149,7 +167,6 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
|
|
|
149
167
|
else:
|
|
150
168
|
logger.info("Ollama is already installed.")
|
|
151
169
|
|
|
152
|
-
# Check if Ollama is already running
|
|
153
170
|
if is_ollama_running():
|
|
154
171
|
logger.info("Ollama is already running. Skipping the start.")
|
|
155
172
|
else:
|
|
@@ -162,7 +179,6 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
|
|
|
162
179
|
|
|
163
180
|
logger.info("Ollama has started.")
|
|
164
181
|
|
|
165
|
-
# Check if the model has already been created
|
|
166
182
|
if is_model_created(model_name):
|
|
167
183
|
logger.info(f"Model {model_name} is already created. Skipping creation.")
|
|
168
184
|
else:
|
|
@@ -175,5 +191,6 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
|
|
|
175
191
|
logger.info(f"model name is > {model_name}")
|
|
176
192
|
logger.info(f"Use Ollama run {model_name}")
|
|
177
193
|
|
|
194
|
+
|
|
178
195
|
if __name__ == "__main__":
|
|
179
196
|
main()
|
webscout/Local/__init__.py
CHANGED
|
@@ -2,10 +2,9 @@
|
|
|
2
2
|
from ._version import __version__, __llama_cpp_version__
|
|
3
3
|
|
|
4
4
|
|
|
5
|
-
from . import
|
|
6
|
-
from . import
|
|
7
|
-
from . import
|
|
8
|
-
|
|
9
|
-
from .model import
|
|
10
|
-
from .thread import
|
|
11
|
-
from .rawdog import *
|
|
5
|
+
from .utils import *
|
|
6
|
+
from .samplers import *
|
|
7
|
+
from .formats import *
|
|
8
|
+
from .ui import *
|
|
9
|
+
from .model import *
|
|
10
|
+
from .thread import *
|