webscout 6.2b0__py3-none-any.whl → 6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (97) hide show
  1. webscout/AIauto.py +191 -176
  2. webscout/AIbase.py +112 -239
  3. webscout/AIutel.py +488 -1130
  4. webscout/Agents/functioncall.py +248 -198
  5. webscout/Bing_search.py +250 -153
  6. webscout/DWEBS.py +454 -178
  7. webscout/Extra/__init__.py +2 -1
  8. webscout/Extra/autocoder/__init__.py +9 -0
  9. webscout/Extra/autocoder/autocoder_utiles.py +121 -0
  10. webscout/Extra/autocoder/rawdog.py +681 -0
  11. webscout/Extra/autollama.py +246 -195
  12. webscout/Extra/gguf.py +441 -226
  13. webscout/Extra/weather.py +172 -67
  14. webscout/LLM.py +442 -100
  15. webscout/Litlogger/__init__.py +681 -0
  16. webscout/Local/formats.py +4 -2
  17. webscout/Provider/Amigo.py +19 -10
  18. webscout/Provider/Andi.py +0 -33
  19. webscout/Provider/Blackboxai.py +4 -204
  20. webscout/Provider/DARKAI.py +1 -1
  21. webscout/Provider/EDITEE.py +1 -1
  22. webscout/Provider/Llama3.py +1 -1
  23. webscout/Provider/Marcus.py +137 -0
  24. webscout/Provider/NinjaChat.py +1 -1
  25. webscout/Provider/PI.py +221 -207
  26. webscout/Provider/Perplexity.py +598 -598
  27. webscout/Provider/RoboCoders.py +206 -0
  28. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  29. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
  30. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
  31. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  32. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  33. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  34. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  35. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
  36. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
  37. webscout/Provider/TTI/__init__.py +3 -4
  38. webscout/Provider/TTI/artbit/__init__.py +22 -0
  39. webscout/Provider/TTI/artbit/async_artbit.py +184 -0
  40. webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
  41. webscout/Provider/TTI/blackbox/__init__.py +4 -0
  42. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
  43. webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
  44. webscout/Provider/TTI/deepinfra/__init__.py +4 -0
  45. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
  46. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
  47. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  48. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  49. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  50. webscout/Provider/TTI/imgninza/__init__.py +4 -0
  51. webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
  52. webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
  53. webscout/Provider/TTI/talkai/__init__.py +4 -0
  54. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  55. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  56. webscout/Provider/__init__.py +146 -132
  57. webscout/Provider/askmyai.py +158 -0
  58. webscout/Provider/cerebras.py +227 -206
  59. webscout/Provider/geminiapi.py +208 -198
  60. webscout/Provider/llama3mitril.py +180 -0
  61. webscout/Provider/llmchat.py +203 -0
  62. webscout/Provider/mhystical.py +176 -0
  63. webscout/Provider/perplexitylabs.py +265 -0
  64. webscout/Provider/talkai.py +196 -0
  65. webscout/Provider/twitterclone.py +251 -244
  66. webscout/Provider/typegpt.py +359 -0
  67. webscout/__init__.py +28 -23
  68. webscout/__main__.py +5 -5
  69. webscout/cli.py +327 -347
  70. webscout/conversation.py +227 -0
  71. webscout/exceptions.py +161 -29
  72. webscout/litagent/__init__.py +172 -0
  73. webscout/litprinter/__init__.py +831 -0
  74. webscout/optimizers.py +270 -0
  75. webscout/prompt_manager.py +279 -0
  76. webscout/swiftcli/__init__.py +810 -0
  77. webscout/transcriber.py +479 -551
  78. webscout/update_checker.py +125 -0
  79. webscout/version.py +1 -1
  80. webscout-6.4.dist-info/LICENSE.md +211 -0
  81. {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/METADATA +34 -55
  82. webscout-6.4.dist-info/RECORD +154 -0
  83. webscout/Provider/TTI/AIuncensored.py +0 -103
  84. webscout/Provider/TTI/Nexra.py +0 -120
  85. webscout/Provider/TTI/PollinationsAI.py +0 -138
  86. webscout/Provider/TTI/WebSimAI.py +0 -142
  87. webscout/Provider/TTI/aiforce.py +0 -160
  88. webscout/Provider/TTI/artbit.py +0 -141
  89. webscout/Provider/TTI/deepinfra.py +0 -148
  90. webscout/Provider/TTI/huggingface.py +0 -155
  91. webscout/models.py +0 -23
  92. webscout-6.2b0.dist-info/LICENSE.md +0 -50
  93. webscout-6.2b0.dist-info/RECORD +0 -118
  94. /webscout/{g4f.py → gpt4free.py} +0 -0
  95. {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/WHEEL +0 -0
  96. {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/entry_points.txt +0 -0
  97. {webscout-6.2b0.dist-info → webscout-6.4.dist-info}/top_level.txt +0 -0
@@ -1,196 +1,247 @@
1
- import warnings
2
- from datetime import time
3
- import os
4
- import sys
5
- import subprocess
6
- import logging
7
- import psutil
8
- from huggingface_hub import hf_hub_download # Updated import
9
- import colorlog
10
- import ollama
11
- import argparse
12
-
13
- # Suppress specific warnings
14
- warnings.filterwarnings(
15
- "ignore", category=FutureWarning, module="huggingface_hub.file_download"
16
- )
17
-
18
- # Configure logging with colors
19
- handler = colorlog.StreamHandler()
20
- handler.setFormatter(
21
- colorlog.ColoredFormatter(
22
- "%(log_color)s%(asctime)s - %(levelname)s - %(message)s",
23
- datefmt="%Y-%m-%d %H:%M:%S",
24
- log_colors={
25
- "DEBUG": "cyan",
26
- "INFO": "green",
27
- "WARNING": "yellow",
28
- "ERROR": "red",
29
- "CRITICAL": "red,bg_white",
30
- },
31
- )
32
- )
33
-
34
- logger = colorlog.getLogger(__name__)
35
- if not logger.hasHandlers():
36
- logger.addHandler(handler)
37
- logger.setLevel(logging.INFO)
38
-
39
- logging.captureWarnings(True)
40
- py_warnings_logger = logging.getLogger("py.warnings")
41
- if not py_warnings_logger.hasHandlers():
42
- py_warnings_logger.addHandler(handler)
43
-
44
-
45
- def show_art():
46
- logger.info("Made with love in India")
47
-
48
-
49
- def usage():
50
- logger.info("Usage: python script.py -m <model_path> -g <gguf_file>")
51
- logger.info("Options:")
52
- logger.info(" -m <model_path> Set the path to the model")
53
- logger.info(" -g <gguf_file> Set the GGUF file name")
54
- logger.info(" -h Display this help and exit")
55
-
56
-
57
- def is_model_downloaded(logging_name, download_log):
58
- if not os.path.exists(download_log):
59
- return False
60
- with open(download_log, "r") as f:
61
- for line in f:
62
- if line.strip() == logging_name:
63
- return True
64
- return False
65
-
66
-
67
- def log_downloaded_model(logging_name, download_log):
68
- with open(download_log, "a") as f:
69
- f.write(logging_name + "\n")
70
-
71
-
72
- def is_model_created(model_name):
73
- result = subprocess.run(["ollama", "list"], stdout=subprocess.PIPE)
74
- return model_name in result.stdout.decode("utf-8")
75
-
76
-
77
- def download_model(repo_id, filename, token, cache_dir="downloads"):
78
- """
79
- Downloads a model file from the Hugging Face Hub using hf_hub_download.
80
- """
81
- try:
82
- os.makedirs(cache_dir, exist_ok=True)
83
-
84
- # Download using hf_hub_download
85
- filepath = hf_hub_download(
86
- repo_id=repo_id,
87
- filename=filename,
88
- token=token,
89
- cache_dir=cache_dir,
90
- resume_download=True,
91
- force_download=False,
92
- local_files_only=False
93
- )
94
-
95
- # Ensure file is in the expected location
96
- expected_path = os.path.join(cache_dir, filename)
97
- if filepath != expected_path:
98
- os.makedirs(os.path.dirname(expected_path), exist_ok=True)
99
- if not os.path.exists(expected_path):
100
- import shutil
101
- shutil.copy2(filepath, expected_path)
102
- filepath = expected_path
103
-
104
- return filepath
105
-
106
- except Exception as e:
107
- logger.error(f"Error downloading model: {str(e)}")
108
- raise
109
-
110
-
111
- def is_ollama_running():
112
- for proc in psutil.process_iter(["name"]):
113
- if proc.info["name"] in ["ollama", "ollama.exe"]:
114
- return True
115
- return False
116
-
117
-
118
- def main(model_path=None, gguf_file=None):
119
- show_art()
120
-
121
- parser = argparse.ArgumentParser(description="Download and create an Ollama model")
122
- parser.add_argument("-m", "--model_path", help="Path to the model on Hugging Face Hub")
123
- parser.add_argument("-g", "--gguf_file", help="Name of the GGUF file")
124
- args = parser.parse_args()
125
-
126
- model_path = args.model_path if args.model_path else model_path
127
- gguf_file = args.gguf_file if args.gguf_file else gguf_file
128
-
129
- if not model_path or not gguf_file:
130
- logger.error("Error: model_path and gguf_file are required.")
131
- usage()
132
- sys.exit(2)
133
-
134
- model_name = gguf_file.split(".Q4")[0]
135
- download_log = "downloaded_models.log"
136
- logging_name = f"{model_path}_{model_name}"
137
-
138
- if not os.path.exists(download_log):
139
- with open(download_log, 'w') as f:
140
- pass
141
-
142
- try:
143
- subprocess.check_output(['pip', 'show', 'huggingface-hub'])
144
- except subprocess.CalledProcessError:
145
- logger.info("Installing huggingface-hub...")
146
- subprocess.check_call(['pip', 'install', '-U', 'huggingface_hub[cli]'])
147
- else:
148
- logger.info("huggingface-hub is already installed.")
149
-
150
- if is_model_downloaded(logging_name, download_log):
151
- logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
152
- else:
153
- logger.info(f"Downloading model {logging_name}...")
154
- token = os.getenv('HUGGINGFACE_TOKEN', None)
155
- if not token:
156
- logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
157
-
158
- filepath = download_model(model_path, gguf_file, token)
159
- log_downloaded_model(logging_name, download_log)
160
- logger.info(f"Model {logging_name} downloaded and logged.")
161
-
162
- try:
163
- subprocess.check_output(['ollama', '--version'])
164
- except subprocess.CalledProcessError:
165
- logger.info("Installing Ollama...")
166
- subprocess.check_call(['curl', '-fsSL', 'https://ollama.com/install.sh', '|', 'sh'])
167
- else:
168
- logger.info("Ollama is already installed.")
169
-
170
- if is_ollama_running():
171
- logger.info("Ollama is already running. Skipping the start.")
172
- else:
173
- logger.info("Starting Ollama...")
174
- subprocess.Popen(['ollama', 'serve'])
175
-
176
- while not is_ollama_running():
177
- logger.info("Waiting for Ollama to start...")
178
- time.sleep(1)
179
-
180
- logger.info("Ollama has started.")
181
-
182
- if is_model_created(model_name):
183
- logger.info(f"Model {model_name} is already created. Skipping creation.")
184
- else:
185
- logger.info(f"Creating model {model_name}...")
186
- with open('Modelfile', 'w') as f:
187
- f.write(f"FROM ./downloads/{gguf_file}")
188
- subprocess.check_call(['ollama', 'create', model_name, '-f', 'Modelfile'])
189
- logger.info(f"Model {model_name} created.")
190
-
191
- logger.info(f"model name is > {model_name}")
192
- logger.info(f"Use Ollama run {model_name}")
193
-
194
-
195
- if __name__ == "__main__":
1
+ """
2
+ Yo fam! 🔥 Welcome to AutoLlama - your go-to tool for downloading and setting up HelpingAI models! 💪
3
+
4
+ Created by the legendary Abhay Koul, this script's got your back when it comes to:
5
+ - Downloading models straight from HuggingFace Hub 🚀
6
+ - Setting up Ollama with zero hassle 💯
7
+ - Getting your AI assistant ready to vibe with you! ⚡
8
+
9
+ Usage:
10
+ >>> python -m webscout.Extra.autollama download -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
11
+
12
+ Features:
13
+ - Smart model management 🧠
14
+ - Automatic dependency installation 📦
15
+ - Progress tracking that keeps it real 📈
16
+ - Error handling that's got your back 💪
17
+
18
+ Join the squad on Discord and level up your AI game! 🎮
19
+ """
20
+
21
+ import warnings
22
+ from datetime import time
23
+ import os
24
+ import sys
25
+ import subprocess
26
+ import psutil
27
+ from huggingface_hub import hf_hub_download
28
+ from ..Litlogger import LitLogger, LogFormat, ColorScheme
29
+ from ..swiftcli import CLI, option
30
+ # import ollama
31
+
32
+ # Suppress specific warnings
33
+ warnings.filterwarnings(
34
+ "ignore", category=FutureWarning, module="huggingface_hub.file_download"
35
+ )
36
+
37
+ # Initialize LitLogger with custom format and colors
38
+ logger = LitLogger(
39
+ name="AutoLlama",
40
+ format=LogFormat.MODERN_EMOJI,
41
+ color_scheme=ColorScheme.OCEAN
42
+ )
43
+
44
+ def show_art():
45
+ """Dropping that signature HAI love! 💝 Made with passion in India! 🇮🇳"""
46
+ logger.info("Made with love in India")
47
+
48
+ def usage():
49
+ logger.info("Usage: python script.py -m <model_path> -g <gguf_file>")
50
+ logger.info("Options:")
51
+ logger.info(" -m <model_path> Set the path to the model")
52
+ logger.info(" -g <gguf_file> Set the GGUF file name")
53
+ logger.info(" -h Display this help and exit")
54
+
55
+ def is_model_downloaded(logging_name, download_log):
56
+ """
57
+ Checking if we already got that model downloaded! 🔍
58
+
59
+ Args:
60
+ logging_name (str): The model's unique name in our records 📝
61
+ download_log (str): Where we keep track of our downloads 📋
62
+
63
+ Returns:
64
+ bool: True if we got it, False if we need to grab it! 💯
65
+ """
66
+ if not os.path.exists(download_log):
67
+ return False
68
+ with open(download_log, "r") as f:
69
+ for line in f:
70
+ if line.strip() == logging_name:
71
+ return True
72
+ return False
73
+
74
+ def log_downloaded_model(logging_name, download_log):
75
+ """
76
+ Keeping track of our downloaded models like a boss! 📝
77
+
78
+ Args:
79
+ logging_name (str): Model's name to remember 🏷️
80
+ download_log (str): Our download history file 📋
81
+ """
82
+ with open(download_log, "a") as f:
83
+ f.write(logging_name + "\n")
84
+
85
+ def is_model_created(model_name):
86
+ """
87
+ Checking if the model's already set up in Ollama! 🔍
88
+
89
+ Args:
90
+ model_name (str): Name of the model we're looking for 🎯
91
+
92
+ Returns:
93
+ bool: True if it's ready to roll, False if we need to set it up! 💪
94
+ """
95
+ result = subprocess.run(["ollama", "list"], stdout=subprocess.PIPE)
96
+ return model_name in result.stdout.decode("utf-8")
97
+
98
+ def download_model(repo_id, filename, token, cache_dir="downloads"):
99
+ """
100
+ Pulling models straight from HuggingFace Hub! 🚀
101
+
102
+ Args:
103
+ repo_id (str): Where to find the model on HF 🎯
104
+ filename (str): Name of the file we want 📄
105
+ token (str): Your HF access token (optional but recommended) 🔑
106
+ cache_dir (str): Where to save the downloads (default: 'downloads') 📂
107
+
108
+ Returns:
109
+ str: Path to your downloaded model file 📍
110
+
111
+ Raises:
112
+ Exception: If something goes wrong, we'll let you know what's up! ⚠️
113
+ """
114
+ try:
115
+ os.makedirs(cache_dir, exist_ok=True)
116
+
117
+ # Download using hf_hub_download
118
+ filepath = hf_hub_download(
119
+ repo_id=repo_id,
120
+ filename=filename,
121
+ token=token,
122
+ cache_dir=cache_dir,
123
+ resume_download=True,
124
+ force_download=False,
125
+ local_files_only=False
126
+ )
127
+
128
+ # Ensure file is in the expected location
129
+ expected_path = os.path.join(cache_dir, filename)
130
+ if filepath != expected_path:
131
+ os.makedirs(os.path.dirname(expected_path), exist_ok=True)
132
+ if not os.path.exists(expected_path):
133
+ import shutil
134
+ shutil.copy2(filepath, expected_path)
135
+ filepath = expected_path
136
+
137
+ return filepath
138
+
139
+ except Exception as e:
140
+ logger.error(f"Error downloading model: {str(e)}")
141
+ raise
142
+
143
+ def is_ollama_running():
144
+ """
145
+ Checking if Ollama's up and running! 🏃‍♂️
146
+
147
+ Returns:
148
+ bool: True if Ollama's vibing, False if it needs a kickstart! ⚡
149
+ """
150
+ for proc in psutil.process_iter(["name"]):
151
+ if proc.info["name"] in ["ollama", "ollama.exe"]:
152
+ return True
153
+ return False
154
+
155
+ # Initialize CLI
156
+ app = CLI(
157
+ name="autollama",
158
+ help="Download and create Ollama models",
159
+ version="1.0.0"
160
+ )
161
+
162
+ @app.command(name="download")
163
+ @option("-m", "--model-path", help="Path to the model on Hugging Face Hub", required=True)
164
+ @option("-g", "--gguf-file", help="Name of the GGUF file", required=True)
165
+ def download_command(model_path: str, gguf_file: str):
166
+ """
167
+ Your one-stop command to download and set up HelpingAI models! 🚀
168
+
169
+ Args:
170
+ model_path (str): Where to find your model on HuggingFace Hub 🎯
171
+ gguf_file (str): The GGUF file you want to download 📄
172
+
173
+ Example:
174
+ >>> python -m webscout.Extra.autollama download \\
175
+ ... -m "OEvortex/HelpingAI-Lite-1.5T" \\
176
+ ... -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
177
+ """
178
+ show_art()
179
+
180
+ model_name = gguf_file.split(".Q4")[0]
181
+ download_log = "downloaded_models.log"
182
+ logging_name = f"{model_path}_{model_name}"
183
+
184
+ if not os.path.exists(download_log):
185
+ with open(download_log, 'w') as f:
186
+ pass
187
+
188
+ try:
189
+ subprocess.check_output(['pip', 'show', 'huggingface-hub'])
190
+ except subprocess.CalledProcessError:
191
+ logger.info("Installing huggingface-hub...")
192
+ subprocess.check_call(['pip', 'install', '-U', 'huggingface_hub[cli]'])
193
+ else:
194
+ logger.info("huggingface-hub is already installed.")
195
+
196
+ if is_model_downloaded(logging_name, download_log):
197
+ logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
198
+ else:
199
+ logger.info(f"Downloading model {logging_name}...")
200
+ token = os.getenv('HUGGINGFACE_TOKEN', None)
201
+ if not token:
202
+ logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
203
+
204
+ filepath = download_model(model_path, gguf_file, token)
205
+ log_downloaded_model(logging_name, download_log)
206
+ logger.info(f"Model {logging_name} downloaded and logged.")
207
+
208
+ try:
209
+ subprocess.check_output(['ollama', '--version'])
210
+ except subprocess.CalledProcessError:
211
+ logger.info("Installing Ollama...")
212
+ subprocess.check_call(['curl', '-fsSL', 'https://ollama.com/install.sh', '|', 'sh'])
213
+ else:
214
+ logger.info("Ollama is already installed.")
215
+
216
+ if is_ollama_running():
217
+ logger.info("Ollama is already running. Skipping the start.")
218
+ else:
219
+ logger.info("Starting Ollama...")
220
+ subprocess.Popen(['ollama', 'serve'])
221
+
222
+ while not is_ollama_running():
223
+ logger.info("Waiting for Ollama to start...")
224
+ time.sleep(1)
225
+
226
+ logger.info("Ollama has started.")
227
+
228
+ if is_model_created(model_name):
229
+ logger.info(f"Model {model_name} is already created. Skipping creation.")
230
+ else:
231
+ logger.info(f"Creating model {model_name}...")
232
+ with open('Modelfile', 'w') as f:
233
+ f.write(f"FROM ./downloads/{gguf_file}")
234
+ subprocess.check_call(['ollama', 'create', model_name, '-f', 'Modelfile'])
235
+ logger.info(f"Model {model_name} created.")
236
+
237
+ logger.success(f"model name is > {model_name}")
238
+ logger.info(f"Use Ollama run {model_name}")
239
+
240
+ def main():
241
+ """
242
+ Main function to run the AutoLlama CLI.
243
+ """
244
+ app.run()
245
+
246
+ if __name__ == "__main__":
196
247
  main()