webscout 6.3__py3-none-any.whl → 6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (131) hide show
  1. webscout/AIauto.py +191 -176
  2. webscout/AIbase.py +0 -197
  3. webscout/AIutel.py +441 -1130
  4. webscout/DWEBS.py +189 -35
  5. webscout/{YTdownloader.py → Extra/YTToolkit/YTdownloader.py} +990 -1103
  6. webscout/Extra/YTToolkit/__init__.py +3 -0
  7. webscout/{transcriber.py → Extra/YTToolkit/transcriber.py} +479 -551
  8. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  10. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  11. webscout/Extra/YTToolkit/ytapi/extras.py +45 -0
  12. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  13. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  14. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  15. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  16. webscout/Extra/YTToolkit/ytapi/query.py +37 -0
  17. webscout/Extra/YTToolkit/ytapi/stream.py +60 -0
  18. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  19. webscout/Extra/YTToolkit/ytapi/video.py +102 -0
  20. webscout/Extra/__init__.py +3 -1
  21. webscout/Extra/autocoder/__init__.py +9 -0
  22. webscout/Extra/autocoder/autocoder_utiles.py +121 -0
  23. webscout/Extra/autocoder/rawdog.py +680 -0
  24. webscout/Extra/autollama.py +246 -195
  25. webscout/Extra/gguf.py +81 -56
  26. webscout/Extra/markdownlite/__init__.py +862 -0
  27. webscout/Extra/weather_ascii.py +2 -2
  28. webscout/LLM.py +206 -43
  29. webscout/Litlogger/__init__.py +681 -0
  30. webscout/Provider/DARKAI.py +1 -1
  31. webscout/Provider/EDITEE.py +1 -1
  32. webscout/Provider/NinjaChat.py +1 -1
  33. webscout/Provider/PI.py +120 -35
  34. webscout/Provider/Perplexity.py +590 -598
  35. webscout/Provider/Reka.py +0 -1
  36. webscout/Provider/RoboCoders.py +206 -0
  37. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  38. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
  39. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
  40. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  41. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  42. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  43. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  44. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
  45. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
  46. webscout/Provider/TTI/__init__.py +2 -4
  47. webscout/Provider/TTI/artbit/__init__.py +22 -0
  48. webscout/Provider/TTI/artbit/async_artbit.py +184 -0
  49. webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
  50. webscout/Provider/TTI/blackbox/__init__.py +4 -0
  51. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
  52. webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
  53. webscout/Provider/TTI/deepinfra/__init__.py +4 -0
  54. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
  55. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
  56. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  57. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  58. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  59. webscout/Provider/TTI/imgninza/__init__.py +4 -0
  60. webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
  61. webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
  62. webscout/Provider/TTI/talkai/__init__.py +4 -0
  63. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  64. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  65. webscout/Provider/TTS/__init__.py +5 -1
  66. webscout/Provider/TTS/deepgram.py +183 -0
  67. webscout/Provider/TTS/elevenlabs.py +137 -0
  68. webscout/Provider/TTS/gesserit.py +151 -0
  69. webscout/Provider/TTS/murfai.py +139 -0
  70. webscout/Provider/TTS/parler.py +134 -107
  71. webscout/Provider/TTS/streamElements.py +360 -275
  72. webscout/Provider/TTS/utils.py +280 -0
  73. webscout/Provider/TTS/voicepod.py +116 -116
  74. webscout/Provider/__init__.py +8 -1
  75. webscout/Provider/askmyai.py +2 -2
  76. webscout/Provider/cerebras.py +227 -219
  77. webscout/Provider/llama3mitril.py +0 -1
  78. webscout/Provider/meta.py +794 -779
  79. webscout/Provider/mhystical.py +176 -0
  80. webscout/Provider/perplexitylabs.py +265 -0
  81. webscout/Provider/twitterclone.py +251 -245
  82. webscout/Provider/typegpt.py +358 -0
  83. webscout/__init__.py +9 -8
  84. webscout/__main__.py +5 -5
  85. webscout/cli.py +252 -280
  86. webscout/conversation.py +227 -0
  87. webscout/exceptions.py +161 -29
  88. webscout/litagent/__init__.py +172 -0
  89. webscout/litprinter/__init__.py +832 -0
  90. webscout/optimizers.py +270 -0
  91. webscout/prompt_manager.py +279 -0
  92. webscout/scout/__init__.py +11 -0
  93. webscout/scout/core.py +884 -0
  94. webscout/scout/element.py +459 -0
  95. webscout/scout/parsers/__init__.py +69 -0
  96. webscout/scout/parsers/html5lib_parser.py +172 -0
  97. webscout/scout/parsers/html_parser.py +236 -0
  98. webscout/scout/parsers/lxml_parser.py +178 -0
  99. webscout/scout/utils.py +38 -0
  100. webscout/swiftcli/__init__.py +810 -0
  101. webscout/update_checker.py +125 -0
  102. webscout/version.py +1 -1
  103. webscout/zeroart/__init__.py +55 -0
  104. webscout/zeroart/base.py +61 -0
  105. webscout/zeroart/effects.py +99 -0
  106. webscout/zeroart/fonts.py +816 -0
  107. webscout/zerodir/__init__.py +225 -0
  108. {webscout-6.3.dist-info → webscout-6.5.dist-info}/METADATA +37 -112
  109. webscout-6.5.dist-info/RECORD +179 -0
  110. webscout/Agents/Onlinesearcher.py +0 -182
  111. webscout/Agents/__init__.py +0 -2
  112. webscout/Agents/functioncall.py +0 -248
  113. webscout/Bing_search.py +0 -154
  114. webscout/Provider/TTI/AIuncensoredimage.py +0 -103
  115. webscout/Provider/TTI/Nexra.py +0 -120
  116. webscout/Provider/TTI/PollinationsAI.py +0 -138
  117. webscout/Provider/TTI/WebSimAI.py +0 -142
  118. webscout/Provider/TTI/aiforce.py +0 -160
  119. webscout/Provider/TTI/artbit.py +0 -141
  120. webscout/Provider/TTI/deepinfra.py +0 -148
  121. webscout/Provider/TTI/huggingface.py +0 -155
  122. webscout/Provider/TTI/talkai.py +0 -116
  123. webscout/g4f.py +0 -666
  124. webscout/models.py +0 -23
  125. webscout/requestsHTMLfix.py +0 -775
  126. webscout/webai.py +0 -2590
  127. webscout-6.3.dist-info/RECORD +0 -124
  128. {webscout-6.3.dist-info → webscout-6.5.dist-info}/LICENSE.md +0 -0
  129. {webscout-6.3.dist-info → webscout-6.5.dist-info}/WHEEL +0 -0
  130. {webscout-6.3.dist-info → webscout-6.5.dist-info}/entry_points.txt +0 -0
  131. {webscout-6.3.dist-info → webscout-6.5.dist-info}/top_level.txt +0 -0
@@ -1,196 +1,247 @@
1
- import warnings
2
- from datetime import time
3
- import os
4
- import sys
5
- import subprocess
6
- import logging
7
- import psutil
8
- from huggingface_hub import hf_hub_download # Updated import
9
- import colorlog
10
- import ollama
11
- import argparse
12
-
13
- # Suppress specific warnings
14
- warnings.filterwarnings(
15
- "ignore", category=FutureWarning, module="huggingface_hub.file_download"
16
- )
17
-
18
- # Configure logging with colors
19
- handler = colorlog.StreamHandler()
20
- handler.setFormatter(
21
- colorlog.ColoredFormatter(
22
- "%(log_color)s%(asctime)s - %(levelname)s - %(message)s",
23
- datefmt="%Y-%m-%d %H:%M:%S",
24
- log_colors={
25
- "DEBUG": "cyan",
26
- "INFO": "green",
27
- "WARNING": "yellow",
28
- "ERROR": "red",
29
- "CRITICAL": "red,bg_white",
30
- },
31
- )
32
- )
33
-
34
- logger = colorlog.getLogger(__name__)
35
- if not logger.hasHandlers():
36
- logger.addHandler(handler)
37
- logger.setLevel(logging.INFO)
38
-
39
- logging.captureWarnings(True)
40
- py_warnings_logger = logging.getLogger("py.warnings")
41
- if not py_warnings_logger.hasHandlers():
42
- py_warnings_logger.addHandler(handler)
43
-
44
-
45
- def show_art():
46
- logger.info("Made with love in India")
47
-
48
-
49
- def usage():
50
- logger.info("Usage: python script.py -m <model_path> -g <gguf_file>")
51
- logger.info("Options:")
52
- logger.info(" -m <model_path> Set the path to the model")
53
- logger.info(" -g <gguf_file> Set the GGUF file name")
54
- logger.info(" -h Display this help and exit")
55
-
56
-
57
- def is_model_downloaded(logging_name, download_log):
58
- if not os.path.exists(download_log):
59
- return False
60
- with open(download_log, "r") as f:
61
- for line in f:
62
- if line.strip() == logging_name:
63
- return True
64
- return False
65
-
66
-
67
- def log_downloaded_model(logging_name, download_log):
68
- with open(download_log, "a") as f:
69
- f.write(logging_name + "\n")
70
-
71
-
72
- def is_model_created(model_name):
73
- result = subprocess.run(["ollama", "list"], stdout=subprocess.PIPE)
74
- return model_name in result.stdout.decode("utf-8")
75
-
76
-
77
- def download_model(repo_id, filename, token, cache_dir="downloads"):
78
- """
79
- Downloads a model file from the Hugging Face Hub using hf_hub_download.
80
- """
81
- try:
82
- os.makedirs(cache_dir, exist_ok=True)
83
-
84
- # Download using hf_hub_download
85
- filepath = hf_hub_download(
86
- repo_id=repo_id,
87
- filename=filename,
88
- token=token,
89
- cache_dir=cache_dir,
90
- resume_download=True,
91
- force_download=False,
92
- local_files_only=False
93
- )
94
-
95
- # Ensure file is in the expected location
96
- expected_path = os.path.join(cache_dir, filename)
97
- if filepath != expected_path:
98
- os.makedirs(os.path.dirname(expected_path), exist_ok=True)
99
- if not os.path.exists(expected_path):
100
- import shutil
101
- shutil.copy2(filepath, expected_path)
102
- filepath = expected_path
103
-
104
- return filepath
105
-
106
- except Exception as e:
107
- logger.error(f"Error downloading model: {str(e)}")
108
- raise
109
-
110
-
111
- def is_ollama_running():
112
- for proc in psutil.process_iter(["name"]):
113
- if proc.info["name"] in ["ollama", "ollama.exe"]:
114
- return True
115
- return False
116
-
117
-
118
- def main(model_path=None, gguf_file=None):
119
- show_art()
120
-
121
- parser = argparse.ArgumentParser(description="Download and create an Ollama model")
122
- parser.add_argument("-m", "--model_path", help="Path to the model on Hugging Face Hub")
123
- parser.add_argument("-g", "--gguf_file", help="Name of the GGUF file")
124
- args = parser.parse_args()
125
-
126
- model_path = args.model_path if args.model_path else model_path
127
- gguf_file = args.gguf_file if args.gguf_file else gguf_file
128
-
129
- if not model_path or not gguf_file:
130
- logger.error("Error: model_path and gguf_file are required.")
131
- usage()
132
- sys.exit(2)
133
-
134
- model_name = gguf_file.split(".Q4")[0]
135
- download_log = "downloaded_models.log"
136
- logging_name = f"{model_path}_{model_name}"
137
-
138
- if not os.path.exists(download_log):
139
- with open(download_log, 'w') as f:
140
- pass
141
-
142
- try:
143
- subprocess.check_output(['pip', 'show', 'huggingface-hub'])
144
- except subprocess.CalledProcessError:
145
- logger.info("Installing huggingface-hub...")
146
- subprocess.check_call(['pip', 'install', '-U', 'huggingface_hub[cli]'])
147
- else:
148
- logger.info("huggingface-hub is already installed.")
149
-
150
- if is_model_downloaded(logging_name, download_log):
151
- logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
152
- else:
153
- logger.info(f"Downloading model {logging_name}...")
154
- token = os.getenv('HUGGINGFACE_TOKEN', None)
155
- if not token:
156
- logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
157
-
158
- filepath = download_model(model_path, gguf_file, token)
159
- log_downloaded_model(logging_name, download_log)
160
- logger.info(f"Model {logging_name} downloaded and logged.")
161
-
162
- try:
163
- subprocess.check_output(['ollama', '--version'])
164
- except subprocess.CalledProcessError:
165
- logger.info("Installing Ollama...")
166
- subprocess.check_call(['curl', '-fsSL', 'https://ollama.com/install.sh', '|', 'sh'])
167
- else:
168
- logger.info("Ollama is already installed.")
169
-
170
- if is_ollama_running():
171
- logger.info("Ollama is already running. Skipping the start.")
172
- else:
173
- logger.info("Starting Ollama...")
174
- subprocess.Popen(['ollama', 'serve'])
175
-
176
- while not is_ollama_running():
177
- logger.info("Waiting for Ollama to start...")
178
- time.sleep(1)
179
-
180
- logger.info("Ollama has started.")
181
-
182
- if is_model_created(model_name):
183
- logger.info(f"Model {model_name} is already created. Skipping creation.")
184
- else:
185
- logger.info(f"Creating model {model_name}...")
186
- with open('Modelfile', 'w') as f:
187
- f.write(f"FROM ./downloads/{gguf_file}")
188
- subprocess.check_call(['ollama', 'create', model_name, '-f', 'Modelfile'])
189
- logger.info(f"Model {model_name} created.")
190
-
191
- logger.info(f"model name is > {model_name}")
192
- logger.info(f"Use Ollama run {model_name}")
193
-
194
-
195
- if __name__ == "__main__":
1
+ """
2
+ Yo fam! 🔥 Welcome to AutoLlama - your go-to tool for downloading and setting up HelpingAI models! 💪
3
+
4
+ Created by the legendary Abhay Koul, this script's got your back when it comes to:
5
+ - Downloading models straight from HuggingFace Hub 🚀
6
+ - Setting up Ollama with zero hassle 💯
7
+ - Getting your AI assistant ready to vibe with you! ⚡
8
+
9
+ Usage:
10
+ >>> python -m webscout.Extra.autollama download -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
11
+
12
+ Features:
13
+ - Smart model management 🧠
14
+ - Automatic dependency installation 📦
15
+ - Progress tracking that keeps it real 📈
16
+ - Error handling that's got your back 💪
17
+
18
+ Join the squad on Discord and level up your AI game! 🎮
19
+ """
20
+
21
+ import warnings
22
+ from datetime import time
23
+ import os
24
+ import sys
25
+ import subprocess
26
+ import psutil
27
+ from huggingface_hub import hf_hub_download
28
+ from ..Litlogger import LitLogger, LogFormat, ColorScheme
29
+ from ..swiftcli import CLI, option
30
+ # import ollama
31
+
32
+ # Suppress specific warnings
33
+ warnings.filterwarnings(
34
+ "ignore", category=FutureWarning, module="huggingface_hub.file_download"
35
+ )
36
+
37
+ # Initialize LitLogger with custom format and colors
38
+ logger = LitLogger(
39
+ name="AutoLlama",
40
+ format=LogFormat.MODERN_EMOJI,
41
+ color_scheme=ColorScheme.OCEAN
42
+ )
43
+
44
+ def show_art():
45
+ """Dropping that signature HAI love! 💝 Made with passion in India! 🇮🇳"""
46
+ logger.info("Made with love in India")
47
+
48
+ def usage():
49
+ logger.info("Usage: python script.py -m <model_path> -g <gguf_file>")
50
+ logger.info("Options:")
51
+ logger.info(" -m <model_path> Set the path to the model")
52
+ logger.info(" -g <gguf_file> Set the GGUF file name")
53
+ logger.info(" -h Display this help and exit")
54
+
55
+ def is_model_downloaded(logging_name, download_log):
56
+ """
57
+ Checking if we already got that model downloaded! 🔍
58
+
59
+ Args:
60
+ logging_name (str): The model's unique name in our records 📝
61
+ download_log (str): Where we keep track of our downloads 📋
62
+
63
+ Returns:
64
+ bool: True if we got it, False if we need to grab it! 💯
65
+ """
66
+ if not os.path.exists(download_log):
67
+ return False
68
+ with open(download_log, "r") as f:
69
+ for line in f:
70
+ if line.strip() == logging_name:
71
+ return True
72
+ return False
73
+
74
+ def log_downloaded_model(logging_name, download_log):
75
+ """
76
+ Keeping track of our downloaded models like a boss! 📝
77
+
78
+ Args:
79
+ logging_name (str): Model's name to remember 🏷️
80
+ download_log (str): Our download history file 📋
81
+ """
82
+ with open(download_log, "a") as f:
83
+ f.write(logging_name + "\n")
84
+
85
+ def is_model_created(model_name):
86
+ """
87
+ Checking if the model's already set up in Ollama! 🔍
88
+
89
+ Args:
90
+ model_name (str): Name of the model we're looking for 🎯
91
+
92
+ Returns:
93
+ bool: True if it's ready to roll, False if we need to set it up! 💪
94
+ """
95
+ result = subprocess.run(["ollama", "list"], stdout=subprocess.PIPE)
96
+ return model_name in result.stdout.decode("utf-8")
97
+
98
+ def download_model(repo_id, filename, token, cache_dir="downloads"):
99
+ """
100
+ Pulling models straight from HuggingFace Hub! 🚀
101
+
102
+ Args:
103
+ repo_id (str): Where to find the model on HF 🎯
104
+ filename (str): Name of the file we want 📄
105
+ token (str): Your HF access token (optional but recommended) 🔑
106
+ cache_dir (str): Where to save the downloads (default: 'downloads') 📂
107
+
108
+ Returns:
109
+ str: Path to your downloaded model file 📍
110
+
111
+ Raises:
112
+ Exception: If something goes wrong, we'll let you know what's up! ⚠️
113
+ """
114
+ try:
115
+ os.makedirs(cache_dir, exist_ok=True)
116
+
117
+ # Download using hf_hub_download
118
+ filepath = hf_hub_download(
119
+ repo_id=repo_id,
120
+ filename=filename,
121
+ token=token,
122
+ cache_dir=cache_dir,
123
+ resume_download=True,
124
+ force_download=False,
125
+ local_files_only=False
126
+ )
127
+
128
+ # Ensure file is in the expected location
129
+ expected_path = os.path.join(cache_dir, filename)
130
+ if filepath != expected_path:
131
+ os.makedirs(os.path.dirname(expected_path), exist_ok=True)
132
+ if not os.path.exists(expected_path):
133
+ import shutil
134
+ shutil.copy2(filepath, expected_path)
135
+ filepath = expected_path
136
+
137
+ return filepath
138
+
139
+ except Exception as e:
140
+ logger.error(f"Error downloading model: {str(e)}")
141
+ raise
142
+
143
+ def is_ollama_running():
144
+ """
145
+ Checking if Ollama's up and running! 🏃‍♂️
146
+
147
+ Returns:
148
+ bool: True if Ollama's vibing, False if it needs a kickstart! ⚡
149
+ """
150
+ for proc in psutil.process_iter(["name"]):
151
+ if proc.info["name"] in ["ollama", "ollama.exe"]:
152
+ return True
153
+ return False
154
+
155
+ # Initialize CLI
156
+ app = CLI(
157
+ name="autollama",
158
+ help="Download and create Ollama models",
159
+ version="1.0.0"
160
+ )
161
+
162
+ @app.command(name="download")
163
+ @option("-m", "--model-path", help="Path to the model on Hugging Face Hub", required=True)
164
+ @option("-g", "--gguf-file", help="Name of the GGUF file", required=True)
165
+ def download_command(model_path: str, gguf_file: str):
166
+ """
167
+ Your one-stop command to download and set up HelpingAI models! 🚀
168
+
169
+ Args:
170
+ model_path (str): Where to find your model on HuggingFace Hub 🎯
171
+ gguf_file (str): The GGUF file you want to download 📄
172
+
173
+ Example:
174
+ >>> python -m webscout.Extra.autollama download \\
175
+ ... -m "OEvortex/HelpingAI-Lite-1.5T" \\
176
+ ... -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
177
+ """
178
+ show_art()
179
+
180
+ model_name = gguf_file.split(".Q4")[0]
181
+ download_log = "downloaded_models.log"
182
+ logging_name = f"{model_path}_{model_name}"
183
+
184
+ if not os.path.exists(download_log):
185
+ with open(download_log, 'w') as f:
186
+ pass
187
+
188
+ try:
189
+ subprocess.check_output(['pip', 'show', 'huggingface-hub'])
190
+ except subprocess.CalledProcessError:
191
+ logger.info("Installing huggingface-hub...")
192
+ subprocess.check_call(['pip', 'install', '-U', 'huggingface_hub[cli]'])
193
+ else:
194
+ logger.info("huggingface-hub is already installed.")
195
+
196
+ if is_model_downloaded(logging_name, download_log):
197
+ logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
198
+ else:
199
+ logger.info(f"Downloading model {logging_name}...")
200
+ token = os.getenv('HUGGINGFACE_TOKEN', None)
201
+ if not token:
202
+ logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
203
+
204
+ filepath = download_model(model_path, gguf_file, token)
205
+ log_downloaded_model(logging_name, download_log)
206
+ logger.info(f"Model {logging_name} downloaded and logged.")
207
+
208
+ try:
209
+ subprocess.check_output(['ollama', '--version'])
210
+ except subprocess.CalledProcessError:
211
+ logger.info("Installing Ollama...")
212
+ subprocess.check_call(['curl', '-fsSL', 'https://ollama.com/install.sh', '|', 'sh'])
213
+ else:
214
+ logger.info("Ollama is already installed.")
215
+
216
+ if is_ollama_running():
217
+ logger.info("Ollama is already running. Skipping the start.")
218
+ else:
219
+ logger.info("Starting Ollama...")
220
+ subprocess.Popen(['ollama', 'serve'])
221
+
222
+ while not is_ollama_running():
223
+ logger.info("Waiting for Ollama to start...")
224
+ time.sleep(1)
225
+
226
+ logger.info("Ollama has started.")
227
+
228
+ if is_model_created(model_name):
229
+ logger.info(f"Model {model_name} is already created. Skipping creation.")
230
+ else:
231
+ logger.info(f"Creating model {model_name}...")
232
+ with open('Modelfile', 'w') as f:
233
+ f.write(f"FROM ./downloads/{gguf_file}")
234
+ subprocess.check_call(['ollama', 'create', model_name, '-f', 'Modelfile'])
235
+ logger.info(f"Model {model_name} created.")
236
+
237
+ logger.success(f"model name is > {model_name}")
238
+ logger.info(f"Use Ollama run {model_name}")
239
+
240
+ def main():
241
+ """
242
+ Main function to run the AutoLlama CLI.
243
+ """
244
+ app.run()
245
+
246
+ if __name__ == "__main__":
196
247
  main()
webscout/Extra/gguf.py CHANGED
@@ -1,31 +1,49 @@
1
- # webscout/Extra/gguf.py
1
+ """
2
+ Yo fam! 🔥 Welcome to GGUF Converter - your ultimate tool for converting models to GGUF format! 💪
3
+
4
+ - Converting HuggingFace models to GGUF format 🚀
5
+ - Multiple quantization methods for different needs 🎯
6
+ - Easy upload back to HuggingFace Hub 📤
7
+
8
+ Usage:
9
+ >>> python -m webscout.Extra.gguf convert -m "OEvortex/HelpingAI-Lite-1.5T" -q "q4_k_m,q5_k_m"
10
+ >>> # With upload options:
11
+ >>> python -m webscout.Extra.gguf convert -m "your-model" -u "username" -t "token" -q "q4_k_m"
12
+
13
+ Features:
14
+ - Smart dependency checking 🔍
15
+ - CUDA support detection ⚡
16
+ - Progress tracking that keeps it real 📈
17
+ - Multiple quantization options 🎮
18
+
19
+ Join the squad on Discord and level up your AI game! 🎮
20
+ """
21
+
2
22
  import subprocess
3
23
  import os
4
24
  import sys
5
- import logging
6
25
  import shutil
7
26
  from pathlib import Path
8
27
  from typing import List, Optional, Dict, Any
9
- from pyfiglet import figlet_format
28
+ from webscout.zeroart import figlet_format
10
29
  from rich.console import Console
11
30
  from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn
12
- from rich.logging import RichHandler
13
31
  from rich.panel import Panel
14
32
  from rich.table import Table
15
-
16
- # Set up logging with Rich
17
- logging.basicConfig(
18
- level=logging.INFO,
19
- format="%(message)s",
20
- datefmt="[%X]",
21
- handlers=[RichHandler(rich_tracebacks=True)]
33
+ from ..Litlogger import LitLogger, LogFormat, ColorScheme
34
+ from ..swiftcli import CLI, option
35
+
36
+ # Initialize LitLogger with ocean vibes
37
+ logger = LitLogger(
38
+ name="GGUFConverter",
39
+ format=LogFormat.MODERN_EMOJI,
40
+ color_scheme=ColorScheme.OCEAN
22
41
  )
23
42
 
24
- log = logging.getLogger("rich")
25
43
  console = Console()
26
44
 
27
45
  class ConversionError(Exception):
28
- """Custom exception for conversion errors"""
46
+ """Custom exception for when things don't go as planned! ⚠️"""
29
47
  pass
30
48
 
31
49
  class ModelConverter:
@@ -94,16 +112,16 @@ class ModelConverter:
94
112
 
95
113
  with console.status("[bold green]Setting up llama.cpp...") as status:
96
114
  if not llama_path.exists():
97
- log.info("Cloning llama.cpp repository...")
115
+ logger.info("Cloning llama.cpp repository...")
98
116
  subprocess.run(['git', 'clone', 'https://github.com/ggerganov/llama.cpp'], check=True)
99
117
 
100
118
  os.chdir(llama_path)
101
- log.info("Installing requirements...")
119
+ logger.info("Installing requirements...")
102
120
  subprocess.run(['pip3', 'install', '-r', 'requirements.txt'], check=True)
103
121
 
104
122
  has_cuda = subprocess.run(['nvcc', '--version'], capture_output=True).returncode == 0
105
123
 
106
- log.info("Building llama.cpp...")
124
+ logger.info("Building llama.cpp...")
107
125
  subprocess.run(['make', 'clean'], check=True)
108
126
  if has_cuda:
109
127
  status.update("[bold green]Building with CUDA support...")
@@ -135,7 +153,7 @@ class ModelConverter:
135
153
  """Performs the model conversion process."""
136
154
  try:
137
155
  # Display banner and configuration
138
- console.print(f"[bold green]{figlet_format('GGUF Converter')}[/]\n", justify="center")
156
+ console.print(f"[bold green]{figlet_format('GGUF Converter')}")
139
157
  self.display_config()
140
158
 
141
159
  # Validate inputs
@@ -186,11 +204,11 @@ class ModelConverter:
186
204
  break
187
205
  if output:
188
206
  progress.update(task, description=output.strip())
189
- log.info(output.strip())
207
+ logger.info(output.strip())
190
208
 
191
209
  stderr = process.stderr.read()
192
210
  if stderr:
193
- log.warning(stderr)
211
+ logger.warning(stderr)
194
212
 
195
213
  if process.returncode != 0:
196
214
  raise ConversionError(f"Conversion failed with return code {process.returncode}")
@@ -372,45 +390,52 @@ echo "Script completed."
372
390
  script_path.write_text(script_content)
373
391
  script_path.chmod(0o755)
374
392
 
375
- def convert(
376
- model_id: str,
377
- username: Optional[str] = None,
378
- token: Optional[str] = None,
379
- quantization_methods: str = "q4_k_m,q5_k_m"
380
- ) -> None:
381
- """Converts and quantizes a Hugging Face model to GGUF format.
393
+ # Initialize CLI with HAI vibes
394
+ app = CLI(
395
+ name="gguf",
396
+ help="Convert HuggingFace models to GGUF format with style! 🔥",
397
+ version="1.0.0"
398
+ )
382
399
 
400
+ @app.command(name="convert")
401
+ @option("-m", "--model-id", help="The HuggingFace model ID (e.g., 'OEvortex/HelpingAI-Lite-1.5T')", required=True)
402
+ @option("-u", "--username", help="Your HuggingFace username for uploads", default=None)
403
+ @option("-t", "--token", help="Your HuggingFace API token for uploads", default=None)
404
+ @option("-q", "--quantization", help="Comma-separated quantization methods", default="q4_k_m,q5_k_m")
405
+ def convert_command(model_id: str, username: Optional[str] = None,
406
+ token: Optional[str] = None, quantization: str = "q4_k_m,q5_k_m"):
407
+ """
408
+ Convert and quantize HuggingFace models to GGUF format! 🚀
409
+
383
410
  Args:
384
- model_id (str): The Hugging Face model ID (e.g., 'google/flan-t5-xl').
385
- username (str, optional): Your Hugging Face username. Required for uploads.
386
- token (str, optional): Your Hugging Face API token. Required for uploads.
387
- quantization_methods (str, optional): Comma-separated quantization methods.
388
- Defaults to "q4_k_m,q5_k_m".
389
-
390
- Raises:
391
- ConversionError: If any step in the conversion process fails.
392
- ValueError: If invalid parameters are provided.
411
+ model_id (str): Your model's HF ID (like 'OEvortex/HelpingAI-Lite-1.5T') 🎯
412
+ username (str, optional): Your HF username for uploads 👤
413
+ token (str, optional): Your HF API token 🔑
414
+ quantization (str): Quantization methods (default: q4_k_m,q5_k_m) 🎮
415
+
416
+ Example:
417
+ >>> python -m webscout.Extra.gguf convert \\
418
+ ... -m "OEvortex/HelpingAI-Lite-1.5T" \\
419
+ ... -q "q4_k_m,q5_k_m"
393
420
  """
394
- converter = ModelConverter(model_id, username, token, quantization_methods)
395
- converter.convert()
421
+ try:
422
+ converter = ModelConverter(
423
+ model_id=model_id,
424
+ username=username,
425
+ token=token,
426
+ quantization_methods=quantization
427
+ )
428
+ converter.convert()
429
+ except (ConversionError, ValueError) as e:
430
+ logger.error(f"Conversion failed: {str(e)}")
431
+ sys.exit(1)
432
+ except Exception as e:
433
+ logger.error(f"Unexpected error: {str(e)}")
434
+ sys.exit(1)
435
+
436
+ def main():
437
+ """Fire up the GGUF converter! 🚀"""
438
+ app.run()
396
439
 
397
440
  if __name__ == "__main__":
398
- import argparse
399
-
400
- parser = argparse.ArgumentParser(description="Convert Hugging Face models to GGUF format")
401
- parser.add_argument("model_id", help="The Hugging Face model ID (e.g., 'google/flan-t5-xl')")
402
- parser.add_argument("-u", "--username", help="Your Hugging Face username")
403
- parser.add_argument("-t", "--token", help="Your Hugging Face API token")
404
- parser.add_argument(
405
- "-q", "--quantization-methods",
406
- default="q4_k_m,q5_k_m",
407
- help="Comma-separated quantization methods"
408
- )
409
-
410
- args = parser.parse_args()
411
- convert(
412
- model_id=args.model_id,
413
- username=args.username,
414
- token=args.token,
415
- quantization_methods=args.quantization_methods
416
- )
441
+ main()