webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (147) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +136 -137
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +91 -78
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +59 -35
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +74 -59
  45. webscout/Provider/Groq.py +30 -18
  46. webscout/Provider/Jadve.py +108 -77
  47. webscout/Provider/Llama3.py +117 -94
  48. webscout/Provider/Marcus.py +191 -137
  49. webscout/Provider/Netwrck.py +62 -50
  50. webscout/Provider/PI.py +79 -124
  51. webscout/Provider/PizzaGPT.py +129 -83
  52. webscout/Provider/QwenLM.py +311 -0
  53. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  54. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  55. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  56. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  57. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  58. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  59. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  60. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  61. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  62. webscout/Provider/TTI/artbit/__init__.py +22 -22
  63. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  64. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  65. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  66. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  67. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  68. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  69. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  70. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  71. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  72. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  73. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  74. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  75. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  76. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  77. webscout/Provider/TTI/talkai/__init__.py +4 -4
  78. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  79. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  80. webscout/Provider/TTS/deepgram.py +182 -182
  81. webscout/Provider/TTS/elevenlabs.py +136 -136
  82. webscout/Provider/TTS/gesserit.py +150 -150
  83. webscout/Provider/TTS/murfai.py +138 -138
  84. webscout/Provider/TTS/parler.py +133 -134
  85. webscout/Provider/TTS/streamElements.py +360 -360
  86. webscout/Provider/TTS/utils.py +280 -280
  87. webscout/Provider/TTS/voicepod.py +116 -116
  88. webscout/Provider/TextPollinationsAI.py +74 -47
  89. webscout/Provider/WiseCat.py +193 -0
  90. webscout/Provider/__init__.py +144 -136
  91. webscout/Provider/cerebras.py +242 -227
  92. webscout/Provider/chatglm.py +204 -204
  93. webscout/Provider/dgaf.py +67 -39
  94. webscout/Provider/gaurish.py +105 -66
  95. webscout/Provider/geminiapi.py +208 -208
  96. webscout/Provider/granite.py +223 -0
  97. webscout/Provider/hermes.py +218 -218
  98. webscout/Provider/llama3mitril.py +179 -179
  99. webscout/Provider/llamatutor.py +72 -62
  100. webscout/Provider/llmchat.py +60 -35
  101. webscout/Provider/meta.py +794 -794
  102. webscout/Provider/multichat.py +331 -230
  103. webscout/Provider/typegpt.py +359 -356
  104. webscout/Provider/yep.py +5 -5
  105. webscout/__main__.py +5 -5
  106. webscout/cli.py +319 -319
  107. webscout/conversation.py +241 -242
  108. webscout/exceptions.py +328 -328
  109. webscout/litagent/__init__.py +28 -28
  110. webscout/litagent/agent.py +2 -3
  111. webscout/litprinter/__init__.py +0 -58
  112. webscout/scout/__init__.py +8 -8
  113. webscout/scout/core.py +884 -884
  114. webscout/scout/element.py +459 -459
  115. webscout/scout/parsers/__init__.py +69 -69
  116. webscout/scout/parsers/html5lib_parser.py +172 -172
  117. webscout/scout/parsers/html_parser.py +236 -236
  118. webscout/scout/parsers/lxml_parser.py +178 -178
  119. webscout/scout/utils.py +38 -38
  120. webscout/swiftcli/__init__.py +811 -811
  121. webscout/update_checker.py +2 -12
  122. webscout/version.py +1 -1
  123. webscout/webscout_search.py +1142 -1140
  124. webscout/webscout_search_async.py +635 -635
  125. webscout/zeroart/__init__.py +54 -54
  126. webscout/zeroart/base.py +60 -60
  127. webscout/zeroart/effects.py +99 -99
  128. webscout/zeroart/fonts.py +816 -816
  129. {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
  130. webscout-7.2.dist-info/RECORD +217 -0
  131. webstoken/__init__.py +30 -30
  132. webstoken/classifier.py +189 -189
  133. webstoken/keywords.py +216 -216
  134. webstoken/language.py +128 -128
  135. webstoken/ner.py +164 -164
  136. webstoken/normalizer.py +35 -35
  137. webstoken/processor.py +77 -77
  138. webstoken/sentiment.py +206 -206
  139. webstoken/stemmer.py +73 -73
  140. webstoken/tagger.py +60 -60
  141. webstoken/tokenizer.py +158 -158
  142. webscout/Provider/RUBIKSAI.py +0 -272
  143. webscout-7.0.dist-info/RECORD +0 -199
  144. {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  145. {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  146. {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  147. {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
@@ -1,231 +1,231 @@
1
- """
2
- >>> python -m webscout.Extra.autollama download -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
3
- """
4
-
5
- import warnings
6
- from datetime import time
7
- import os
8
- import sys
9
- import subprocess
10
- import psutil
11
- from huggingface_hub import hf_hub_download
12
- from ..Litlogger import LitLogger, LogFormat, ColorScheme
13
- from ..swiftcli import CLI, option
14
- # import ollama
15
-
16
- # Suppress specific warnings
17
- warnings.filterwarnings(
18
- "ignore", category=FutureWarning, module="huggingface_hub.file_download"
19
- )
20
-
21
- # Initialize LitLogger with custom format and colors
22
- logger = LitLogger(
23
- name="AutoLlama",
24
- format=LogFormat.MODERN_EMOJI,
25
- color_scheme=ColorScheme.OCEAN
26
- )
27
-
28
- def show_art():
29
- """Dropping that signature HAI love! 💝 Made with passion in India! 🇮🇳"""
30
- logger.info("Made with love in India")
31
-
32
- def usage():
33
- logger.info("Usage: python script.py -m <model_path> -g <gguf_file>")
34
- logger.info("Options:")
35
- logger.info(" -m <model_path> Set the path to the model")
36
- logger.info(" -g <gguf_file> Set the GGUF file name")
37
- logger.info(" -h Display this help and exit")
38
-
39
- def is_model_downloaded(logging_name, download_log):
40
- """
41
- Checking if we already got that model downloaded! 🔍
42
-
43
- Args:
44
- logging_name (str): The model's unique name in our records 📝
45
- download_log (str): Where we keep track of our downloads 📋
46
-
47
- Returns:
48
- bool: True if we got it, False if we need to grab it! 💯
49
- """
50
- if not os.path.exists(download_log):
51
- return False
52
- with open(download_log, "r") as f:
53
- for line in f:
54
- if line.strip() == logging_name:
55
- return True
56
- return False
57
-
58
- def log_downloaded_model(logging_name, download_log):
59
- """
60
- Keeping track of our downloaded models like a boss! 📝
61
-
62
- Args:
63
- logging_name (str): Model's name to remember 🏷️
64
- download_log (str): Our download history file 📋
65
- """
66
- with open(download_log, "a") as f:
67
- f.write(logging_name + "\n")
68
-
69
- def is_model_created(model_name):
70
- """
71
- Checking if the model's already set up in Ollama! 🔍
72
-
73
- Args:
74
- model_name (str): Name of the model we're looking for 🎯
75
-
76
- Returns:
77
- bool: True if it's ready to roll, False if we need to set it up! 💪
78
- """
79
- result = subprocess.run(["ollama", "list"], stdout=subprocess.PIPE)
80
- return model_name in result.stdout.decode("utf-8")
81
-
82
- def download_model(repo_id, filename, token, cache_dir="downloads"):
83
- """
84
- Pulling models straight from HuggingFace Hub! 🚀
85
-
86
- Args:
87
- repo_id (str): Where to find the model on HF 🎯
88
- filename (str): Name of the file we want 📄
89
- token (str): Your HF access token (optional but recommended) 🔑
90
- cache_dir (str): Where to save the downloads (default: 'downloads') 📂
91
-
92
- Returns:
93
- str: Path to your downloaded model file 📍
94
-
95
- Raises:
96
- Exception: If something goes wrong, we'll let you know what's up! ⚠️
97
- """
98
- try:
99
- os.makedirs(cache_dir, exist_ok=True)
100
-
101
- # Download using hf_hub_download
102
- filepath = hf_hub_download(
103
- repo_id=repo_id,
104
- filename=filename,
105
- token=token,
106
- cache_dir=cache_dir,
107
- resume_download=True,
108
- force_download=False,
109
- local_files_only=False
110
- )
111
-
112
- # Ensure file is in the expected location
113
- expected_path = os.path.join(cache_dir, filename)
114
- if filepath != expected_path:
115
- os.makedirs(os.path.dirname(expected_path), exist_ok=True)
116
- if not os.path.exists(expected_path):
117
- import shutil
118
- shutil.copy2(filepath, expected_path)
119
- filepath = expected_path
120
-
121
- return filepath
122
-
123
- except Exception as e:
124
- logger.error(f"Error downloading model: {str(e)}")
125
- raise
126
-
127
- def is_ollama_running():
128
- """
129
- Checking if Ollama's up and running! 🏃‍♂️
130
-
131
- Returns:
132
- bool: True if Ollama's vibing, False if it needs a kickstart! ⚡
133
- """
134
- for proc in psutil.process_iter(["name"]):
135
- if proc.info["name"] in ["ollama", "ollama.exe"]:
136
- return True
137
- return False
138
-
139
- # Initialize CLI
140
- app = CLI(
141
- name="autollama",
142
- help="Download and create Ollama models",
143
- version="1.0.0"
144
- )
145
-
146
- @app.command(name="download")
147
- @option("-m", "--model-path", help="Path to the model on Hugging Face Hub", required=True)
148
- @option("-g", "--gguf-file", help="Name of the GGUF file", required=True)
149
- def download_command(model_path: str, gguf_file: str):
150
- """
151
- Your one-stop command to download and set up HelpingAI models! 🚀
152
-
153
- Args:
154
- model_path (str): Where to find your model on HuggingFace Hub 🎯
155
- gguf_file (str): The GGUF file you want to download 📄
156
-
157
- Example:
158
- >>> python -m webscout.Extra.autollama download \\
159
- ... -m "OEvortex/HelpingAI-Lite-1.5T" \\
160
- ... -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
161
- """
162
- show_art()
163
-
164
- model_name = gguf_file.split(".Q4")[0]
165
- download_log = "downloaded_models.log"
166
- logging_name = f"{model_path}_{model_name}"
167
-
168
- if not os.path.exists(download_log):
169
- with open(download_log, 'w') as f:
170
- pass
171
-
172
- try:
173
- subprocess.check_output(['pip', 'show', 'huggingface-hub'])
174
- except subprocess.CalledProcessError:
175
- logger.info("Installing huggingface-hub...")
176
- subprocess.check_call(['pip', 'install', '-U', 'huggingface_hub[cli]'])
177
- else:
178
- logger.info("huggingface-hub is already installed.")
179
-
180
- if is_model_downloaded(logging_name, download_log):
181
- logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
182
- else:
183
- logger.info(f"Downloading model {logging_name}...")
184
- token = os.getenv('HUGGINGFACE_TOKEN', None)
185
- if not token:
186
- logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
187
-
188
- filepath = download_model(model_path, gguf_file, token)
189
- log_downloaded_model(logging_name, download_log)
190
- logger.info(f"Model {logging_name} downloaded and logged.")
191
-
192
- try:
193
- subprocess.check_output(['ollama', '--version'])
194
- except subprocess.CalledProcessError:
195
- logger.info("Installing Ollama...")
196
- subprocess.check_call(['curl', '-fsSL', 'https://ollama.com/install.sh', '|', 'sh'])
197
- else:
198
- logger.info("Ollama is already installed.")
199
-
200
- if is_ollama_running():
201
- logger.info("Ollama is already running. Skipping the start.")
202
- else:
203
- logger.info("Starting Ollama...")
204
- subprocess.Popen(['ollama', 'serve'])
205
-
206
- while not is_ollama_running():
207
- logger.info("Waiting for Ollama to start...")
208
- time.sleep(1)
209
-
210
- logger.info("Ollama has started.")
211
-
212
- if is_model_created(model_name):
213
- logger.info(f"Model {model_name} is already created. Skipping creation.")
214
- else:
215
- logger.info(f"Creating model {model_name}...")
216
- with open('Modelfile', 'w') as f:
217
- f.write(f"FROM ./downloads/{gguf_file}")
218
- subprocess.check_call(['ollama', 'create', model_name, '-f', 'Modelfile'])
219
- logger.info(f"Model {model_name} created.")
220
-
221
- logger.success(f"model name is > {model_name}")
222
- logger.info(f"Use Ollama run {model_name}")
223
-
224
- def main():
225
- """
226
- Main function to run the AutoLlama CLI.
227
- """
228
- app.run()
229
-
230
- if __name__ == "__main__":
1
+ """
2
+ >>> python -m webscout.Extra.autollama download -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
3
+ """
4
+
5
+ import warnings
6
+ from datetime import time
7
+ import os
8
+ import sys
9
+ import subprocess
10
+ import psutil
11
+ from huggingface_hub import hf_hub_download
12
+ from ..Litlogger import Logger, LogFormat
13
+ from ..swiftcli import CLI, option
14
+ # import ollama
15
+
16
+ # Suppress specific warnings
17
+ warnings.filterwarnings(
18
+ "ignore", category=FutureWarning, module="huggingface_hub.file_download"
19
+ )
20
+
21
+ # Initialize LitLogger with custom format and colors
22
+ logger = Logger(
23
+ name="AutoLlama",
24
+ format=LogFormat.MODERN_EMOJI,
25
+
26
+ )
27
+
28
+ def show_art():
29
+ """Dropping that signature HAI love! 💝 Made with passion in India! 🇮🇳"""
30
+ logger.info("Made with love in India")
31
+
32
+ def usage():
33
+ logger.info("Usage: python script.py -m <model_path> -g <gguf_file>")
34
+ logger.info("Options:")
35
+ logger.info(" -m <model_path> Set the path to the model")
36
+ logger.info(" -g <gguf_file> Set the GGUF file name")
37
+ logger.info(" -h Display this help and exit")
38
+
39
+ def is_model_downloaded(logging_name, download_log):
40
+ """
41
+ Checking if we already got that model downloaded! 🔍
42
+
43
+ Args:
44
+ logging_name (str): The model's unique name in our records 📝
45
+ download_log (str): Where we keep track of our downloads 📋
46
+
47
+ Returns:
48
+ bool: True if we got it, False if we need to grab it! 💯
49
+ """
50
+ if not os.path.exists(download_log):
51
+ return False
52
+ with open(download_log, "r") as f:
53
+ for line in f:
54
+ if line.strip() == logging_name:
55
+ return True
56
+ return False
57
+
58
+ def log_downloaded_model(logging_name, download_log):
59
+ """
60
+ Keeping track of our downloaded models like a boss! 📝
61
+
62
+ Args:
63
+ logging_name (str): Model's name to remember 🏷️
64
+ download_log (str): Our download history file 📋
65
+ """
66
+ with open(download_log, "a") as f:
67
+ f.write(logging_name + "\n")
68
+
69
+ def is_model_created(model_name):
70
+ """
71
+ Checking if the model's already set up in Ollama! 🔍
72
+
73
+ Args:
74
+ model_name (str): Name of the model we're looking for 🎯
75
+
76
+ Returns:
77
+ bool: True if it's ready to roll, False if we need to set it up! 💪
78
+ """
79
+ result = subprocess.run(["ollama", "list"], stdout=subprocess.PIPE)
80
+ return model_name in result.stdout.decode("utf-8")
81
+
82
+ def download_model(repo_id, filename, token, cache_dir="downloads"):
83
+ """
84
+ Pulling models straight from HuggingFace Hub! 🚀
85
+
86
+ Args:
87
+ repo_id (str): Where to find the model on HF 🎯
88
+ filename (str): Name of the file we want 📄
89
+ token (str): Your HF access token (optional but recommended) 🔑
90
+ cache_dir (str): Where to save the downloads (default: 'downloads') 📂
91
+
92
+ Returns:
93
+ str: Path to your downloaded model file 📍
94
+
95
+ Raises:
96
+ Exception: If something goes wrong, we'll let you know what's up! ⚠️
97
+ """
98
+ try:
99
+ os.makedirs(cache_dir, exist_ok=True)
100
+
101
+ # Download using hf_hub_download
102
+ filepath = hf_hub_download(
103
+ repo_id=repo_id,
104
+ filename=filename,
105
+ token=token,
106
+ cache_dir=cache_dir,
107
+ resume_download=True,
108
+ force_download=False,
109
+ local_files_only=False
110
+ )
111
+
112
+ # Ensure file is in the expected location
113
+ expected_path = os.path.join(cache_dir, filename)
114
+ if filepath != expected_path:
115
+ os.makedirs(os.path.dirname(expected_path), exist_ok=True)
116
+ if not os.path.exists(expected_path):
117
+ import shutil
118
+ shutil.copy2(filepath, expected_path)
119
+ filepath = expected_path
120
+
121
+ return filepath
122
+
123
+ except Exception as e:
124
+ logger.error(f"Error downloading model: {str(e)}")
125
+ raise
126
+
127
+ def is_ollama_running():
128
+ """
129
+ Checking if Ollama's up and running! 🏃‍♂️
130
+
131
+ Returns:
132
+ bool: True if Ollama's vibing, False if it needs a kickstart! ⚡
133
+ """
134
+ for proc in psutil.process_iter(["name"]):
135
+ if proc.info["name"] in ["ollama", "ollama.exe"]:
136
+ return True
137
+ return False
138
+
139
+ # Initialize CLI
140
+ app = CLI(
141
+ name="autollama",
142
+ help="Download and create Ollama models",
143
+ version="1.0.0"
144
+ )
145
+
146
+ @app.command(name="download")
147
+ @option("-m", "--model-path", help="Path to the model on Hugging Face Hub", required=True)
148
+ @option("-g", "--gguf-file", help="Name of the GGUF file", required=True)
149
+ def download_command(model_path: str, gguf_file: str):
150
+ """
151
+ Your one-stop command to download and set up HelpingAI models! 🚀
152
+
153
+ Args:
154
+ model_path (str): Where to find your model on HuggingFace Hub 🎯
155
+ gguf_file (str): The GGUF file you want to download 📄
156
+
157
+ Example:
158
+ >>> python -m webscout.Extra.autollama download \\
159
+ ... -m "OEvortex/HelpingAI-Lite-1.5T" \\
160
+ ... -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
161
+ """
162
+ show_art()
163
+
164
+ model_name = gguf_file.split(".Q4")[0]
165
+ download_log = "downloaded_models.log"
166
+ logging_name = f"{model_path}_{model_name}"
167
+
168
+ if not os.path.exists(download_log):
169
+ with open(download_log, 'w') as f:
170
+ pass
171
+
172
+ try:
173
+ subprocess.check_output(['pip', 'show', 'huggingface-hub'])
174
+ except subprocess.CalledProcessError:
175
+ logger.info("Installing huggingface-hub...")
176
+ subprocess.check_call(['pip', 'install', '-U', 'huggingface_hub[cli]'])
177
+ else:
178
+ logger.info("huggingface-hub is already installed.")
179
+
180
+ if is_model_downloaded(logging_name, download_log):
181
+ logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
182
+ else:
183
+ logger.info(f"Downloading model {logging_name}...")
184
+ token = os.getenv('HUGGINGFACE_TOKEN', None)
185
+ if not token:
186
+ logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
187
+
188
+ filepath = download_model(model_path, gguf_file, token)
189
+ log_downloaded_model(logging_name, download_log)
190
+ logger.info(f"Model {logging_name} downloaded and logged.")
191
+
192
+ try:
193
+ subprocess.check_output(['ollama', '--version'])
194
+ except subprocess.CalledProcessError:
195
+ logger.info("Installing Ollama...")
196
+ subprocess.check_call(['curl', '-fsSL', 'https://ollama.com/install.sh', '|', 'sh'])
197
+ else:
198
+ logger.info("Ollama is already installed.")
199
+
200
+ if is_ollama_running():
201
+ logger.info("Ollama is already running. Skipping the start.")
202
+ else:
203
+ logger.info("Starting Ollama...")
204
+ subprocess.Popen(['ollama', 'serve'])
205
+
206
+ while not is_ollama_running():
207
+ logger.info("Waiting for Ollama to start...")
208
+ time.sleep(1)
209
+
210
+ logger.info("Ollama has started.")
211
+
212
+ if is_model_created(model_name):
213
+ logger.info(f"Model {model_name} is already created. Skipping creation.")
214
+ else:
215
+ logger.info(f"Creating model {model_name}...")
216
+ with open('Modelfile', 'w') as f:
217
+ f.write(f"FROM ./downloads/{gguf_file}")
218
+ subprocess.check_call(['ollama', 'create', model_name, '-f', 'Modelfile'])
219
+ logger.info(f"Model {model_name} created.")
220
+
221
+ logger.success(f"model name is > {model_name}")
222
+ logger.info(f"Use Ollama run {model_name}")
223
+
224
+ def main():
225
+ """
226
+ Main function to run the AutoLlama CLI.
227
+ """
228
+ app.run()
229
+
230
+ if __name__ == "__main__":
231
231
  main()
webscout/Extra/gguf.py CHANGED
@@ -17,14 +17,14 @@ from rich.console import Console
17
17
  from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn
18
18
  from rich.panel import Panel
19
19
  from rich.table import Table
20
- from ..Litlogger import LitLogger, LogFormat, ColorScheme
20
+ from ..Litlogger import Logger, LogFormat
21
21
  from ..swiftcli import CLI, option
22
22
 
23
23
  # Initialize LitLogger with ocean vibes
24
- logger = LitLogger(
24
+ logger = Logger(
25
25
  name="GGUFConverter",
26
26
  format=LogFormat.MODERN_EMOJI,
27
- color_scheme=ColorScheme.OCEAN
27
+
28
28
  )
29
29
 
30
30
  console = Console()