webscout 5.0__tar.gz → 5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (89) hide show
  1. {webscout-5.0/webscout.egg-info → webscout-5.1}/PKG-INFO +7 -7
  2. {webscout-5.0 → webscout-5.1}/README.md +4 -6
  3. {webscout-5.0 → webscout-5.1}/setup.py +4 -2
  4. webscout-5.1/webscout/Extra/autollama.py +179 -0
  5. {webscout-5.0 → webscout-5.1}/webscout/Provider/Llama3.py +1 -1
  6. {webscout-5.0 → webscout-5.1}/webscout/Provider/OLLAMA.py +11 -10
  7. {webscout-5.0 → webscout-5.1/webscout.egg-info}/PKG-INFO +7 -7
  8. {webscout-5.0 → webscout-5.1}/webscout.egg-info/requires.txt +2 -0
  9. webscout-5.0/webscout/Extra/autollama.py +0 -209
  10. {webscout-5.0 → webscout-5.1}/LICENSE.md +0 -0
  11. {webscout-5.0 → webscout-5.1}/setup.cfg +0 -0
  12. {webscout-5.0 → webscout-5.1}/webscout/AIauto.py +0 -0
  13. {webscout-5.0 → webscout-5.1}/webscout/AIbase.py +0 -0
  14. {webscout-5.0 → webscout-5.1}/webscout/AIutel.py +0 -0
  15. {webscout-5.0 → webscout-5.1}/webscout/Agents/Onlinesearcher.py +0 -0
  16. {webscout-5.0 → webscout-5.1}/webscout/Agents/__init__.py +0 -0
  17. {webscout-5.0 → webscout-5.1}/webscout/Agents/functioncall.py +0 -0
  18. {webscout-5.0 → webscout-5.1}/webscout/Bard.py +0 -0
  19. {webscout-5.0 → webscout-5.1}/webscout/Bing_search.py +0 -0
  20. {webscout-5.0 → webscout-5.1}/webscout/DWEBS.py +0 -0
  21. {webscout-5.0 → webscout-5.1}/webscout/Extra/__init__.py +0 -0
  22. {webscout-5.0 → webscout-5.1}/webscout/Extra/gguf.py +0 -0
  23. {webscout-5.0 → webscout-5.1}/webscout/Extra/weather.py +0 -0
  24. {webscout-5.0 → webscout-5.1}/webscout/Extra/weather_ascii.py +0 -0
  25. {webscout-5.0 → webscout-5.1}/webscout/LLM.py +0 -0
  26. {webscout-5.0 → webscout-5.1}/webscout/Local/__init__.py +0 -0
  27. {webscout-5.0 → webscout-5.1}/webscout/Local/_version.py +0 -0
  28. {webscout-5.0 → webscout-5.1}/webscout/Local/formats.py +0 -0
  29. {webscout-5.0 → webscout-5.1}/webscout/Local/model.py +0 -0
  30. {webscout-5.0 → webscout-5.1}/webscout/Local/rawdog.py +0 -0
  31. {webscout-5.0 → webscout-5.1}/webscout/Local/samplers.py +0 -0
  32. {webscout-5.0 → webscout-5.1}/webscout/Local/thread.py +0 -0
  33. {webscout-5.0 → webscout-5.1}/webscout/Local/utils.py +0 -0
  34. {webscout-5.0 → webscout-5.1}/webscout/Provider/Andi.py +0 -0
  35. {webscout-5.0 → webscout-5.1}/webscout/Provider/BasedGPT.py +0 -0
  36. {webscout-5.0 → webscout-5.1}/webscout/Provider/Berlin4h.py +0 -0
  37. {webscout-5.0 → webscout-5.1}/webscout/Provider/Blackboxai.py +0 -0
  38. {webscout-5.0 → webscout-5.1}/webscout/Provider/Cloudflare.py +0 -0
  39. {webscout-5.0 → webscout-5.1}/webscout/Provider/Cohere.py +0 -0
  40. {webscout-5.0 → webscout-5.1}/webscout/Provider/DARKAI.py +0 -0
  41. {webscout-5.0 → webscout-5.1}/webscout/Provider/Deepinfra.py +0 -0
  42. {webscout-5.0 → webscout-5.1}/webscout/Provider/Deepseek.py +0 -0
  43. {webscout-5.0 → webscout-5.1}/webscout/Provider/DiscordRocks.py +0 -0
  44. {webscout-5.0 → webscout-5.1}/webscout/Provider/Farfalle.py +0 -0
  45. {webscout-5.0 → webscout-5.1}/webscout/Provider/Gemini.py +0 -0
  46. {webscout-5.0 → webscout-5.1}/webscout/Provider/Groq.py +0 -0
  47. {webscout-5.0 → webscout-5.1}/webscout/Provider/Koboldai.py +0 -0
  48. {webscout-5.0 → webscout-5.1}/webscout/Provider/Llama.py +0 -0
  49. {webscout-5.0 → webscout-5.1}/webscout/Provider/Openai.py +0 -0
  50. {webscout-5.0 → webscout-5.1}/webscout/Provider/PI.py +0 -0
  51. {webscout-5.0 → webscout-5.1}/webscout/Provider/Perplexity.py +0 -0
  52. {webscout-5.0 → webscout-5.1}/webscout/Provider/Phind.py +0 -0
  53. {webscout-5.0 → webscout-5.1}/webscout/Provider/PizzaGPT.py +0 -0
  54. {webscout-5.0 → webscout-5.1}/webscout/Provider/Poe.py +0 -0
  55. {webscout-5.0 → webscout-5.1}/webscout/Provider/RUBIKSAI.py +0 -0
  56. {webscout-5.0 → webscout-5.1}/webscout/Provider/Reka.py +0 -0
  57. {webscout-5.0 → webscout-5.1}/webscout/Provider/ThinkAnyAI.py +0 -0
  58. {webscout-5.0 → webscout-5.1}/webscout/Provider/Youchat.py +0 -0
  59. {webscout-5.0 → webscout-5.1}/webscout/Provider/__init__.py +0 -0
  60. {webscout-5.0 → webscout-5.1}/webscout/Provider/ai4chat.py +0 -0
  61. {webscout-5.0 → webscout-5.1}/webscout/Provider/felo_search.py +0 -0
  62. {webscout-5.0 → webscout-5.1}/webscout/Provider/julius.py +0 -0
  63. {webscout-5.0 → webscout-5.1}/webscout/Provider/koala.py +0 -0
  64. {webscout-5.0 → webscout-5.1}/webscout/Provider/liaobots.py +0 -0
  65. {webscout-5.0 → webscout-5.1}/webscout/Provider/meta.py +0 -0
  66. {webscout-5.0 → webscout-5.1}/webscout/Provider/turboseek.py +0 -0
  67. {webscout-5.0 → webscout-5.1}/webscout/Provider/xdash.py +0 -0
  68. {webscout-5.0 → webscout-5.1}/webscout/Provider/yep.py +0 -0
  69. {webscout-5.0 → webscout-5.1}/webscout/YTdownloader.py +0 -0
  70. {webscout-5.0 → webscout-5.1}/webscout/__init__.py +0 -0
  71. {webscout-5.0 → webscout-5.1}/webscout/__main__.py +0 -0
  72. {webscout-5.0 → webscout-5.1}/webscout/async_providers.py +0 -0
  73. {webscout-5.0 → webscout-5.1}/webscout/cli.py +0 -0
  74. {webscout-5.0 → webscout-5.1}/webscout/exceptions.py +0 -0
  75. {webscout-5.0 → webscout-5.1}/webscout/g4f.py +0 -0
  76. {webscout-5.0 → webscout-5.1}/webscout/models.py +0 -0
  77. {webscout-5.0 → webscout-5.1}/webscout/tempid.py +0 -0
  78. {webscout-5.0 → webscout-5.1}/webscout/transcriber.py +0 -0
  79. {webscout-5.0 → webscout-5.1}/webscout/utils.py +0 -0
  80. {webscout-5.0 → webscout-5.1}/webscout/version.py +0 -0
  81. {webscout-5.0 → webscout-5.1}/webscout/voice.py +0 -0
  82. {webscout-5.0 → webscout-5.1}/webscout/webai.py +0 -0
  83. {webscout-5.0 → webscout-5.1}/webscout/webscout_search.py +0 -0
  84. {webscout-5.0 → webscout-5.1}/webscout/webscout_search_async.py +0 -0
  85. {webscout-5.0 → webscout-5.1}/webscout/websx_search.py +0 -0
  86. {webscout-5.0 → webscout-5.1}/webscout.egg-info/SOURCES.txt +0 -0
  87. {webscout-5.0 → webscout-5.1}/webscout.egg-info/dependency_links.txt +0 -0
  88. {webscout-5.0 → webscout-5.1}/webscout.egg-info/entry_points.txt +0 -0
  89. {webscout-5.0 → webscout-5.1}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 5.0
3
+ Version: 5.1
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -66,6 +66,8 @@ Requires-Dist: requests_html
66
66
  Requires-Dist: bson
67
67
  Requires-Dist: cloudscraper
68
68
  Requires-Dist: emoji
69
+ Requires-Dist: colorlog
70
+ Requires-Dist: openai
69
71
  Provides-Extra: dev
70
72
  Requires-Dist: ruff>=0.1.6; extra == "dev"
71
73
  Requires-Dist: pytest>=7.4.2; extra == "dev"
@@ -1645,15 +1647,13 @@ gguf.convert(
1645
1647
 
1646
1648
  Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1647
1649
 
1648
- **Example:**
1649
-
1650
1650
  ```python
1651
1651
  from webscout import autollama
1652
1652
 
1653
- autollama(
1654
- model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1655
- gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1656
- )
1653
+ model_path = "Vortex4ai/Jarvis-0.5B"
1654
+ gguf_file = "test2-q4_k_m.gguf"
1655
+
1656
+ autollama.main(model_path, gguf_file)
1657
1657
  ```
1658
1658
 
1659
1659
  **Command Line Usage:**
@@ -1568,15 +1568,13 @@ gguf.convert(
1568
1568
 
1569
1569
  Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1570
1570
 
1571
- **Example:**
1572
-
1573
1571
  ```python
1574
1572
  from webscout import autollama
1575
1573
 
1576
- autollama(
1577
- model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1578
- gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1579
- )
1574
+ model_path = "Vortex4ai/Jarvis-0.5B"
1575
+ gguf_file = "test2-q4_k_m.gguf"
1576
+
1577
+ autollama.main(model_path, gguf_file)
1580
1578
  ```
1581
1579
 
1582
1580
  **Command Line Usage:**
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="5.0",
8
+ version="5.1",
9
9
  description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -69,7 +69,9 @@ setup(
69
69
  "requests_html",
70
70
  "bson",
71
71
  "cloudscraper",
72
- "emoji"
72
+ "emoji",
73
+ "colorlog",
74
+ "openai",
73
75
  ],
74
76
  entry_points={
75
77
  "console_scripts": [
@@ -0,0 +1,179 @@
1
+ import warnings
2
+ from datetime import time
3
+ import os
4
+ import sys
5
+ import subprocess
6
+ import logging
7
+ import psutil
8
+ from huggingface_hub import hf_hub_url, cached_download
9
+ import colorlog
10
+ import ollama # Import ollama for interactive chat
11
+ import argparse # Import argparse for command-line arguments
12
+
13
+ # Suppress specific warnings
14
+ warnings.filterwarnings(
15
+ "ignore", category=FutureWarning, module="huggingface_hub.file_download"
16
+ )
17
+
18
+ # Configure logging with colors
19
+ handler = colorlog.StreamHandler()
20
+ handler.setFormatter(
21
+ colorlog.ColoredFormatter(
22
+ "%(log_color)s%(asctime)s - %(levelname)s - %(message)s",
23
+ datefmt="%Y-%m-%d %H:%M:%S",
24
+ log_colors={
25
+ "DEBUG": "cyan",
26
+ "INFO": "green",
27
+ "WARNING": "yellow",
28
+ "ERROR": "red",
29
+ "CRITICAL": "red,bg_white",
30
+ },
31
+ )
32
+ )
33
+
34
+ logger = colorlog.getLogger(__name__)
35
+ if not logger.hasHandlers():
36
+ logger.addHandler(handler)
37
+ logger.setLevel(logging.INFO)
38
+
39
+ # Redirect warnings to the logger but avoid duplication
40
+ logging.captureWarnings(True)
41
+ py_warnings_logger = logging.getLogger("py.warnings")
42
+ if not py_warnings_logger.hasHandlers():
43
+ py_warnings_logger.addHandler(handler)
44
+
45
+
46
+ def show_art():
47
+ logger.info("Made with love in India")
48
+
49
+
50
+ def usage():
51
+ logger.info("Usage: python script.py -m <model_path> -g <gguf_file>")
52
+ logger.info("Options:")
53
+ logger.info(" -m <model_path> Set the path to the model")
54
+ logger.info(" -g <gguf_file> Set the GGUF file name")
55
+ logger.info(" -h Display this help and exit")
56
+
57
+
58
+ def is_model_downloaded(logging_name, download_log):
59
+ if not os.path.exists(download_log):
60
+ return False
61
+ with open(download_log, "r") as f:
62
+ for line in f:
63
+ if line.strip() == logging_name:
64
+ return True
65
+ return False
66
+
67
+
68
+ def log_downloaded_model(logging_name, download_log):
69
+ with open(download_log, "a") as f:
70
+ f.write(logging_name + "\n")
71
+
72
+
73
+ def is_model_created(model_name):
74
+ result = subprocess.run(["ollama", "list"], stdout=subprocess.PIPE)
75
+ return model_name in result.stdout.decode("utf-8")
76
+
77
+
78
+ def download_model(repo_id, filename, token, cache_dir="downloads"):
79
+ url = hf_hub_url(repo_id, filename)
80
+ filepath = cached_download(
81
+ url, cache_dir=cache_dir, force_filename=filename, use_auth_token=token
82
+ )
83
+ return filepath
84
+
85
+
86
+ def is_ollama_running():
87
+ for proc in psutil.process_iter(["name"]):
88
+ if proc.info["name"] in ["ollama", "ollama.exe"]:
89
+ return True
90
+ return False
91
+
92
+
93
+ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and non-CLI
94
+ show_art()
95
+
96
+ # Parse command-line arguments if provided
97
+ parser = argparse.ArgumentParser(description="Download and create an Ollama model")
98
+ parser.add_argument("-m", "--model_path", help="Path to the model on Hugging Face Hub")
99
+ parser.add_argument("-g", "--gguf_file", help="Name of the GGUF file")
100
+ args = parser.parse_args()
101
+
102
+ # Use arguments from command line or function parameters
103
+ model_path = args.model_path if args.model_path else model_path
104
+ gguf_file = args.gguf_file if args.gguf_file else gguf_file
105
+
106
+ if not model_path or not gguf_file:
107
+ logger.error("Error: model_path and gguf_file are required.")
108
+ usage()
109
+ sys.exit(2)
110
+
111
+ model_name = gguf_file.split(".Q4")[0]
112
+ download_log = "downloaded_models.log"
113
+ logging_name = f"{model_path}_{model_name}"
114
+
115
+ # Ensure the log file exists
116
+ if not os.path.exists(download_log):
117
+ with open(download_log, 'w') as f:
118
+ pass
119
+
120
+ # Check if huggingface-hub is installed, and install it if not
121
+ try:
122
+ subprocess.check_output(['pip', 'show', 'huggingface-hub'])
123
+ except subprocess.CalledProcessError:
124
+ logger.info("Installing huggingface-hub...")
125
+ subprocess.check_call(['pip', 'install', '-U', 'huggingface_hub[cli]'])
126
+ else:
127
+ logger.info("huggingface-hub is already installed.")
128
+
129
+ # Check if the model has already been downloaded
130
+ if is_model_downloaded(logging_name, download_log):
131
+ logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
132
+ else:
133
+ logger.info(f"Downloading model {logging_name}...")
134
+ token = os.getenv('HUGGINGFACE_TOKEN', None)
135
+ if not token:
136
+ logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
137
+ token = None
138
+
139
+ filepath = download_model(model_path, gguf_file, token)
140
+ log_downloaded_model(logging_name, download_log)
141
+ logger.info(f"Model {logging_name} downloaded and logged.")
142
+
143
+ # Check if Ollama is installed, and install it if not
144
+ try:
145
+ subprocess.check_output(['ollama', '--version'])
146
+ except subprocess.CalledProcessError:
147
+ logger.info("Installing Ollama...")
148
+ subprocess.check_call(['curl', '-fsSL', 'https://ollama.com/install.sh', '|', 'sh'])
149
+ else:
150
+ logger.info("Ollama is already installed.")
151
+
152
+ # Check if Ollama is already running
153
+ if is_ollama_running():
154
+ logger.info("Ollama is already running. Skipping the start.")
155
+ else:
156
+ logger.info("Starting Ollama...")
157
+ subprocess.Popen(['ollama', 'serve'])
158
+
159
+ while not is_ollama_running():
160
+ logger.info("Waiting for Ollama to start...")
161
+ time.sleep(1)
162
+
163
+ logger.info("Ollama has started.")
164
+
165
+ # Check if the model has already been created
166
+ if is_model_created(model_name):
167
+ logger.info(f"Model {model_name} is already created. Skipping creation.")
168
+ else:
169
+ logger.info(f"Creating model {model_name}...")
170
+ with open('Modelfile', 'w') as f:
171
+ f.write(f"FROM ./downloads/{gguf_file}")
172
+ subprocess.check_call(['ollama', 'create', model_name, '-f', 'Modelfile'])
173
+ logger.info(f"Model {model_name} created.")
174
+
175
+ logger.info(f"model name is > {model_name}")
176
+ logger.info(f"Use Ollama run {model_name}")
177
+
178
+ if __name__ == "__main__":
179
+ main()
@@ -21,7 +21,7 @@ class LLAMA3(Provider):
21
21
  history_offset: int = 10250,
22
22
  act: str = None,
23
23
  model: str = "llama3-8b",
24
- system: str = "Answer as concisely as possible.",
24
+ system: str = "GPT syle",
25
25
  ):
26
26
  """Instantiates Snova
27
27
 
@@ -36,11 +36,12 @@ class OLLAMA(Provider):
36
36
  proxies: dict = {},
37
37
  history_offset: int = 10250,
38
38
  act: str = None,
39
+ system_prompt: str = "You are a helpful and friendly AI assistant.",
39
40
  ):
40
41
  """Instantiates Ollama
41
42
 
42
43
  Args:
43
- model (str, optional): Model name. Defaults to 'llama2'.
44
+ model (str, optional): Model name. Defaults to 'qwen2:0.5b'.
44
45
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
45
46
  max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
46
47
  timeout (int, optional): Http request timeout. Defaults to 30.
@@ -50,12 +51,14 @@ class OLLAMA(Provider):
50
51
  proxies (dict, optional): Http request proxies. Defaults to {}.
51
52
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
53
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
54
+ system_prompt (str, optional): System prompt for Ollama. Defaults to "You are a helpful and friendly AI assistant.".
53
55
  """
54
56
  self.model = model
55
57
  self.is_conversation = is_conversation
56
58
  self.max_tokens_to_sample = max_tokens
57
59
  self.timeout = timeout
58
60
  self.last_response = {}
61
+ self.system_prompt = system_prompt
59
62
 
60
63
  self.__available_optimizers = (
61
64
  method
@@ -110,21 +113,19 @@ class OLLAMA(Provider):
110
113
  )
111
114
 
112
115
  def for_stream():
116
+ # Correctly call ollama.chat with stream=True
113
117
  stream = ollama.chat(model=self.model, messages=[
118
+ {'role': 'system', 'content': self.system_prompt},
114
119
  {'role': 'user', 'content': conversation_prompt}
115
120
  ], stream=True)
116
121
 
117
- message_load = ""
122
+ # Yield each chunk directly
118
123
  for chunk in stream:
119
- message_load += chunk['message']['content']
120
- yield chunk['message']['content'] if raw else dict(text=message_load)
121
- self.last_response.update(dict(text=message_load))
122
- self.conversation.update_chat_history(
123
- prompt, self.get_message(self.last_response)
124
- )
124
+ yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
125
125
 
126
126
  def for_non_stream():
127
127
  response = ollama.chat(model=self.model, messages=[
128
+ {'role': 'system', 'content': self.system_prompt}, # Add system message
128
129
  {'role': 'user', 'content': conversation_prompt}
129
130
  ])
130
131
  self.last_response.update(dict(text=response['message']['content']))
@@ -183,6 +184,6 @@ class OLLAMA(Provider):
183
184
  return response["text"]
184
185
  if __name__ == "__main__":
185
186
  ollama_provider = OLLAMA(model="qwen:0.5b")
186
- response = ollama_provider.chat("hi")
187
+ response = ollama_provider.chat("hi", stream=True)
187
188
  for r in response:
188
- print(r, end="", flush=True)
189
+ print(r, end="", flush=True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 5.0
3
+ Version: 5.1
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -66,6 +66,8 @@ Requires-Dist: requests_html
66
66
  Requires-Dist: bson
67
67
  Requires-Dist: cloudscraper
68
68
  Requires-Dist: emoji
69
+ Requires-Dist: colorlog
70
+ Requires-Dist: openai
69
71
  Provides-Extra: dev
70
72
  Requires-Dist: ruff>=0.1.6; extra == "dev"
71
73
  Requires-Dist: pytest>=7.4.2; extra == "dev"
@@ -1645,15 +1647,13 @@ gguf.convert(
1645
1647
 
1646
1648
  Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1647
1649
 
1648
- **Example:**
1649
-
1650
1650
  ```python
1651
1651
  from webscout import autollama
1652
1652
 
1653
- autollama(
1654
- model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1655
- gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1656
- )
1653
+ model_path = "Vortex4ai/Jarvis-0.5B"
1654
+ gguf_file = "test2-q4_k_m.gguf"
1655
+
1656
+ autollama.main(model_path, gguf_file)
1657
1657
  ```
1658
1658
 
1659
1659
  **Command Line Usage:**
@@ -40,6 +40,8 @@ requests_html
40
40
  bson
41
41
  cloudscraper
42
42
  emoji
43
+ colorlog
44
+ openai
43
45
 
44
46
  [dev]
45
47
  ruff>=0.1.6
@@ -1,209 +0,0 @@
1
- import subprocess
2
- import argparse
3
- import os
4
- from rich.console import Console
5
- from rich.panel import Panel
6
- from rich.progress import track
7
- from yaspin import yaspin
8
- from pyfiglet import figlet_format
9
- import time
10
-
11
- console = Console()
12
-
13
- def autollama(model_path, gguf_file):
14
- """Manages models with Ollama using the autollama.sh script.
15
-
16
- Args:
17
- model_path (str): The path to the Hugging Face model.
18
- gguf_file (str): The name of the GGUF file.
19
- """
20
- console.print(f"[bold green]{figlet_format('Autollama')}[/]\n", justify="center")
21
-
22
- # Check if autollama.sh exists in the current working directory
23
- script_path = os.path.join(os.getcwd(), "autollama.sh")
24
- if not os.path.exists(script_path):
25
- # Create autollama.sh with the content provided
26
- with open(script_path, "w") as f:
27
- f.write("""
28
- function show_art() {
29
- cat << "EOF"
30
- Made with love in India
31
- EOF
32
- }
33
-
34
- show_art
35
-
36
- # Initialize default values
37
- MODEL_PATH=""
38
- GGUF_FILE=""
39
-
40
- # Display help/usage information
41
- usage() {
42
- echo "Usage: $0 -m <model_path> -g <gguf_file>"
43
- echo
44
- echo "Options:"
45
- echo " -m <model_path> Set the path to the model"
46
- echo " -g <gguf_file> Set the GGUF file name"
47
- echo " -h Display this help and exit"
48
- echo
49
- }
50
-
51
- # Parse command-line options
52
- while getopts ":m:g:h" opt; do
53
- case ${opt} in
54
- m )
55
- MODEL_PATH=$OPTARG
56
- ;;
57
- g )
58
- GGUF_FILE=$OPTARG
59
- ;;
60
- h )
61
- usage
62
- exit 0
63
- ;;
64
- \? )
65
- echo "Invalid Option: -$OPTARG" 1>&2
66
- usage
67
- exit 1
68
- ;;
69
- : )
70
- echo "Invalid Option: -$OPTARG requires an argument" 1>&2
71
- usage
72
- exit 1
73
- ;;
74
- esac
75
- done
76
-
77
- # Check required parameters
78
- if [ -z "$MODEL_PATH" ] || [ -z "$GGUF_FILE" ]; then
79
- echo "Error: -m (model_path) and -g (gguf_file) are required."
80
- usage
81
- exit 1
82
- fi
83
-
84
- # Derive MODEL_NAME
85
- MODEL_NAME=$(echo $GGUF_FILE | sed 's/\(.*\)\.Q4.*/\\1/')
86
-
87
- # Log file where downloaded models are recorded
88
- DOWNLOAD_LOG="downloaded_models.log"
89
-
90
- # Composite logging name
91
- LOGGING_NAME="${MODEL_PATH}_${MODEL_NAME}"
92
-
93
- # Check if the model has been downloaded
94
- function is_model_downloaded {
95
- grep -qxF "$LOGGING_NAME" "$DOWNLOAD_LOG" && return 0 || return 1
96
- }
97
-
98
- # Log the downloaded model
99
- function log_downloaded_model {
100
- echo "$LOGGING_NAME" >> "$DOWNLOAD_LOG"
101
- }
102
-
103
- # Function to check if the model has already been created
104
- function is_model_created {
105
- # 'ollama list' lists all models
106
- ollama list | grep -q "$MODEL_NAME" && return 0 || return 1
107
- }
108
-
109
- # Check if huggingface-hub is installed, and install it if not
110
- if ! pip show huggingface-hub > /dev/null; then
111
- echo "Installing huggingface-hub..."
112
- pip install -U "huggingface_hub[cli]"
113
- else
114
- echo "huggingface-hub is already installed."
115
- fi
116
-
117
- # Check if the model has already been downloaded
118
- if is_model_downloaded; then
119
- echo "Model $LOGGING_NAME has already been downloaded. Skipping download."
120
- else
121
- echo "Downloading model $LOGGING_NAME..."
122
- # Download the model
123
- huggingface-cli download $MODEL_PATH $GGUF_FILE --local-dir downloads --local-dir-use-symlinks False
124
-
125
- # Log the downloaded model
126
- log_downloaded_model
127
- echo "Model $LOGGING_NAME downloaded and logged."
128
- fi
129
-
130
- # Check if Ollama is installed, and install it if not
131
- if ! command -v ollama &> /dev/null; then
132
- echo "Installing Ollama..."
133
- curl -fsSL https://ollama.com/install.sh | sh
134
- else
135
- echo "Ollama is already installed."
136
- fi
137
-
138
- # Check if Ollama is already running
139
- if pgrep -f 'ollama serve' > /dev/null; then
140
- echo "Ollama is already running. Skipping the start."
141
- else
142
- echo "Starting Ollama..."
143
- # Start Ollama in the background
144
- ollama serve &
145
-
146
- # Wait for Ollama to start
147
- while true; do
148
- if pgrep -f 'ollama serve' > /dev/null; then
149
- echo "Ollama has started."
150
- sleep 60
151
- break
152
- else
153
- echo "Waiting for Ollama to start..."
154
- sleep 1 # Wait for 1 second before checking again
155
- fi
156
- done
157
- fi
158
-
159
- # Check if the model has already been created
160
- if is_model_created; then
161
- echo "Model $MODEL_NAME is already created. Skipping creation."
162
- else
163
- echo "Creating model $MODEL_NAME..."
164
- # Create the model in Ollama
165
- # Prepare Modelfile with the downloaded path
166
- echo "FROM ./downloads/$GGUF_FILE" > Modelfile
167
- ollama create $MODEL_NAME -f Modelfile
168
- echo "Model $MODEL_NAME created."
169
- fi
170
-
171
-
172
- echo "model name is > $MODEL_NAME"
173
- echo "Use Ollama run $MODEL_NAME"
174
- """)
175
- # Make autollama.sh executable (using chmod)
176
- os.chmod(script_path, 0o755)
177
-
178
- # Initialize command list
179
- command = ["bash", script_path, "-m", model_path, "-g", gguf_file]
180
-
181
- # Execute the command
182
- process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
183
-
184
- for line in iter(process.stdout.readline, ''):
185
- console.print(Panel(line.strip(), title="Autollama Output", expand=False))
186
-
187
- for line in iter(process.stderr.readline, ''):
188
- console.print(Panel(line.strip(), title="Autollama Errors (if any)", expand=False))
189
-
190
- process.wait()
191
- console.print("[green]Model is ready![/]")
192
-
193
- def main():
194
- parser = argparse.ArgumentParser(description='Automatically create and run an Ollama model in Ollama')
195
- parser.add_argument('-m', '--model_path', required=True, help='Set the huggingface model id to the Hugging Face model')
196
- parser.add_argument('-g', '--gguf_file', required=True, help='Set the GGUF file name')
197
- args = parser.parse_args()
198
-
199
- try:
200
- with yaspin(text="Processing...") as spinner:
201
- autollama(args.model_path, args.gguf_file)
202
- spinner.ok("Done!")
203
- except Exception as e:
204
- console.print(f"[red]Error: {e}[/]")
205
- exit(1)
206
-
207
- if __name__ == "__main__":
208
- main()
209
-
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes