webscout 4.9__tar.gz → 5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (90) hide show
  1. {webscout-4.9/webscout.egg-info → webscout-5.1}/PKG-INFO +7 -7
  2. {webscout-4.9 → webscout-5.1}/README.md +4 -6
  3. {webscout-4.9 → webscout-5.1}/setup.py +4 -2
  4. webscout-5.1/webscout/Agents/functioncall.py +142 -0
  5. webscout-5.1/webscout/Extra/autollama.py +179 -0
  6. {webscout-4.9 → webscout-5.1}/webscout/Provider/DiscordRocks.py +2 -1
  7. {webscout-4.9 → webscout-5.1}/webscout/Provider/Llama3.py +1 -1
  8. {webscout-4.9 → webscout-5.1}/webscout/Provider/OLLAMA.py +11 -10
  9. {webscout-4.9 → webscout-5.1/webscout.egg-info}/PKG-INFO +7 -7
  10. {webscout-4.9 → webscout-5.1}/webscout.egg-info/requires.txt +2 -0
  11. webscout-4.9/webscout/Agents/functioncall.py +0 -186
  12. webscout-4.9/webscout/Extra/autollama.py +0 -209
  13. {webscout-4.9 → webscout-5.1}/LICENSE.md +0 -0
  14. {webscout-4.9 → webscout-5.1}/setup.cfg +0 -0
  15. {webscout-4.9 → webscout-5.1}/webscout/AIauto.py +0 -0
  16. {webscout-4.9 → webscout-5.1}/webscout/AIbase.py +0 -0
  17. {webscout-4.9 → webscout-5.1}/webscout/AIutel.py +0 -0
  18. {webscout-4.9 → webscout-5.1}/webscout/Agents/Onlinesearcher.py +0 -0
  19. {webscout-4.9 → webscout-5.1}/webscout/Agents/__init__.py +0 -0
  20. {webscout-4.9 → webscout-5.1}/webscout/Bard.py +0 -0
  21. {webscout-4.9 → webscout-5.1}/webscout/Bing_search.py +0 -0
  22. {webscout-4.9 → webscout-5.1}/webscout/DWEBS.py +0 -0
  23. {webscout-4.9 → webscout-5.1}/webscout/Extra/__init__.py +0 -0
  24. {webscout-4.9 → webscout-5.1}/webscout/Extra/gguf.py +0 -0
  25. {webscout-4.9 → webscout-5.1}/webscout/Extra/weather.py +0 -0
  26. {webscout-4.9 → webscout-5.1}/webscout/Extra/weather_ascii.py +0 -0
  27. {webscout-4.9 → webscout-5.1}/webscout/LLM.py +0 -0
  28. {webscout-4.9 → webscout-5.1}/webscout/Local/__init__.py +0 -0
  29. {webscout-4.9 → webscout-5.1}/webscout/Local/_version.py +0 -0
  30. {webscout-4.9 → webscout-5.1}/webscout/Local/formats.py +0 -0
  31. {webscout-4.9 → webscout-5.1}/webscout/Local/model.py +0 -0
  32. {webscout-4.9 → webscout-5.1}/webscout/Local/rawdog.py +0 -0
  33. {webscout-4.9 → webscout-5.1}/webscout/Local/samplers.py +0 -0
  34. {webscout-4.9 → webscout-5.1}/webscout/Local/thread.py +0 -0
  35. {webscout-4.9 → webscout-5.1}/webscout/Local/utils.py +0 -0
  36. {webscout-4.9 → webscout-5.1}/webscout/Provider/Andi.py +0 -0
  37. {webscout-4.9 → webscout-5.1}/webscout/Provider/BasedGPT.py +0 -0
  38. {webscout-4.9 → webscout-5.1}/webscout/Provider/Berlin4h.py +0 -0
  39. {webscout-4.9 → webscout-5.1}/webscout/Provider/Blackboxai.py +0 -0
  40. {webscout-4.9 → webscout-5.1}/webscout/Provider/Cloudflare.py +0 -0
  41. {webscout-4.9 → webscout-5.1}/webscout/Provider/Cohere.py +0 -0
  42. {webscout-4.9 → webscout-5.1}/webscout/Provider/DARKAI.py +0 -0
  43. {webscout-4.9 → webscout-5.1}/webscout/Provider/Deepinfra.py +0 -0
  44. {webscout-4.9 → webscout-5.1}/webscout/Provider/Deepseek.py +0 -0
  45. {webscout-4.9 → webscout-5.1}/webscout/Provider/Farfalle.py +0 -0
  46. {webscout-4.9 → webscout-5.1}/webscout/Provider/Gemini.py +0 -0
  47. {webscout-4.9 → webscout-5.1}/webscout/Provider/Groq.py +0 -0
  48. {webscout-4.9 → webscout-5.1}/webscout/Provider/Koboldai.py +0 -0
  49. {webscout-4.9 → webscout-5.1}/webscout/Provider/Llama.py +0 -0
  50. {webscout-4.9 → webscout-5.1}/webscout/Provider/Openai.py +0 -0
  51. {webscout-4.9 → webscout-5.1}/webscout/Provider/PI.py +0 -0
  52. {webscout-4.9 → webscout-5.1}/webscout/Provider/Perplexity.py +0 -0
  53. {webscout-4.9 → webscout-5.1}/webscout/Provider/Phind.py +0 -0
  54. {webscout-4.9 → webscout-5.1}/webscout/Provider/PizzaGPT.py +0 -0
  55. {webscout-4.9 → webscout-5.1}/webscout/Provider/Poe.py +0 -0
  56. {webscout-4.9 → webscout-5.1}/webscout/Provider/RUBIKSAI.py +0 -0
  57. {webscout-4.9 → webscout-5.1}/webscout/Provider/Reka.py +0 -0
  58. {webscout-4.9 → webscout-5.1}/webscout/Provider/ThinkAnyAI.py +0 -0
  59. {webscout-4.9 → webscout-5.1}/webscout/Provider/Youchat.py +0 -0
  60. {webscout-4.9 → webscout-5.1}/webscout/Provider/__init__.py +0 -0
  61. {webscout-4.9 → webscout-5.1}/webscout/Provider/ai4chat.py +0 -0
  62. {webscout-4.9 → webscout-5.1}/webscout/Provider/felo_search.py +0 -0
  63. {webscout-4.9 → webscout-5.1}/webscout/Provider/julius.py +0 -0
  64. {webscout-4.9 → webscout-5.1}/webscout/Provider/koala.py +0 -0
  65. {webscout-4.9 → webscout-5.1}/webscout/Provider/liaobots.py +0 -0
  66. {webscout-4.9 → webscout-5.1}/webscout/Provider/meta.py +0 -0
  67. {webscout-4.9 → webscout-5.1}/webscout/Provider/turboseek.py +0 -0
  68. {webscout-4.9 → webscout-5.1}/webscout/Provider/xdash.py +0 -0
  69. {webscout-4.9 → webscout-5.1}/webscout/Provider/yep.py +0 -0
  70. {webscout-4.9 → webscout-5.1}/webscout/YTdownloader.py +0 -0
  71. {webscout-4.9 → webscout-5.1}/webscout/__init__.py +0 -0
  72. {webscout-4.9 → webscout-5.1}/webscout/__main__.py +0 -0
  73. {webscout-4.9 → webscout-5.1}/webscout/async_providers.py +0 -0
  74. {webscout-4.9 → webscout-5.1}/webscout/cli.py +0 -0
  75. {webscout-4.9 → webscout-5.1}/webscout/exceptions.py +0 -0
  76. {webscout-4.9 → webscout-5.1}/webscout/g4f.py +0 -0
  77. {webscout-4.9 → webscout-5.1}/webscout/models.py +0 -0
  78. {webscout-4.9 → webscout-5.1}/webscout/tempid.py +0 -0
  79. {webscout-4.9 → webscout-5.1}/webscout/transcriber.py +0 -0
  80. {webscout-4.9 → webscout-5.1}/webscout/utils.py +0 -0
  81. {webscout-4.9 → webscout-5.1}/webscout/version.py +0 -0
  82. {webscout-4.9 → webscout-5.1}/webscout/voice.py +0 -0
  83. {webscout-4.9 → webscout-5.1}/webscout/webai.py +0 -0
  84. {webscout-4.9 → webscout-5.1}/webscout/webscout_search.py +0 -0
  85. {webscout-4.9 → webscout-5.1}/webscout/webscout_search_async.py +0 -0
  86. {webscout-4.9 → webscout-5.1}/webscout/websx_search.py +0 -0
  87. {webscout-4.9 → webscout-5.1}/webscout.egg-info/SOURCES.txt +0 -0
  88. {webscout-4.9 → webscout-5.1}/webscout.egg-info/dependency_links.txt +0 -0
  89. {webscout-4.9 → webscout-5.1}/webscout.egg-info/entry_points.txt +0 -0
  90. {webscout-4.9 → webscout-5.1}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.9
3
+ Version: 5.1
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -66,6 +66,8 @@ Requires-Dist: requests_html
66
66
  Requires-Dist: bson
67
67
  Requires-Dist: cloudscraper
68
68
  Requires-Dist: emoji
69
+ Requires-Dist: colorlog
70
+ Requires-Dist: openai
69
71
  Provides-Extra: dev
70
72
  Requires-Dist: ruff>=0.1.6; extra == "dev"
71
73
  Requires-Dist: pytest>=7.4.2; extra == "dev"
@@ -1645,15 +1647,13 @@ gguf.convert(
1645
1647
 
1646
1648
  Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1647
1649
 
1648
- **Example:**
1649
-
1650
1650
  ```python
1651
1651
  from webscout import autollama
1652
1652
 
1653
- autollama(
1654
- model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1655
- gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1656
- )
1653
+ model_path = "Vortex4ai/Jarvis-0.5B"
1654
+ gguf_file = "test2-q4_k_m.gguf"
1655
+
1656
+ autollama.main(model_path, gguf_file)
1657
1657
  ```
1658
1658
 
1659
1659
  **Command Line Usage:**
@@ -1568,15 +1568,13 @@ gguf.convert(
1568
1568
 
1569
1569
  Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1570
1570
 
1571
- **Example:**
1572
-
1573
1571
  ```python
1574
1572
  from webscout import autollama
1575
1573
 
1576
- autollama(
1577
- model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1578
- gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1579
- )
1574
+ model_path = "Vortex4ai/Jarvis-0.5B"
1575
+ gguf_file = "test2-q4_k_m.gguf"
1576
+
1577
+ autollama.main(model_path, gguf_file)
1580
1578
  ```
1581
1579
 
1582
1580
  **Command Line Usage:**
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="4.9",
8
+ version="5.1",
9
9
  description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -69,7 +69,9 @@ setup(
69
69
  "requests_html",
70
70
  "bson",
71
71
  "cloudscraper",
72
- "emoji"
72
+ "emoji",
73
+ "colorlog",
74
+ "openai",
73
75
  ],
74
76
  entry_points={
75
77
  "console_scripts": [
@@ -0,0 +1,142 @@
1
+ import json
2
+ import logging
3
+ from webscout import LLAMA3, WEBS
4
+
5
+ class FunctionCallingAgent:
6
+ def __init__(self, model: str = "llama3-8b",
7
+ system_prompt: str = 'You are a helpful assistant that will always answer what the user wants',
8
+ tools: list = None):
9
+ self.LLAMA3 = LLAMA3(model=model, system=system_prompt, timeout=300)
10
+ self.tools = tools if tools is not None else []
11
+ self.webs = WEBS()
12
+
13
+ def function_call_handler(self, message_text: str) -> dict:
14
+ system_message = self._generate_system_message(message_text)
15
+ response = self.LLAMA3.chat(system_message)
16
+ # logging.info(f"Raw response: {response}")
17
+ return self._parse_function_call(response)
18
+
19
+ def _generate_system_message(self, user_message: str) -> str:
20
+ tools_description = '\n'.join([f"- {tool['function']['name']}: {tool['function'].get('description', '')}" for tool in self.tools])
21
+ return (
22
+ "You are an AI assistant capable of understanding user requests and using tools to fulfill them. "
23
+ "Always respond using the JSON format specified below, even if you're not sure about the answer. "
24
+ f"Available tools:\n{tools_description}\n\n"
25
+ "Instructions:\n"
26
+ "1. Analyze the user's request.\n"
27
+ "2. Choose the most appropriate tool based on the request.\n"
28
+ "3. Respond ONLY with a JSON object in this exact format:\n"
29
+ "{\n"
30
+ ' "tool_name": "name_of_the_tool",\n'
31
+ ' "tool_input": {\n'
32
+ ' "param1": "value1",\n'
33
+ ' "param2": "value2"\n'
34
+ " }\n"
35
+ "}\n\n"
36
+ "If you can't determine a suitable tool, use the 'general_ai' tool with the user's message as the 'question' parameter.\n\n"
37
+ f"User request: {user_message}\n\n"
38
+ "Your response (in JSON format):"
39
+ )
40
+
41
+ def _parse_function_call(self, response: str) -> dict:
42
+ try:
43
+ # Find the JSON-like part of the response
44
+ start_idx = response.find("{")
45
+ end_idx = response.rfind("}") + 1
46
+
47
+ if start_idx == -1 or end_idx == -1:
48
+ raise ValueError("No valid JSON structure found in the response.")
49
+
50
+ response_json_str = response[start_idx:end_idx]
51
+
52
+ # Attempt to load the JSON string
53
+ return json.loads(response_json_str)
54
+
55
+ except (ValueError, json.JSONDecodeError) as e:
56
+ logging.error(f"Error parsing function call: {e}")
57
+ return {"error": str(e)}
58
+
59
+ def execute_function(self, function_call_data: dict) -> str:
60
+ function_name = function_call_data.get("tool_name")
61
+ arguments = function_call_data.get("tool_input", {})
62
+
63
+ if not isinstance(arguments, dict):
64
+ logging.error("Invalid arguments format.")
65
+ return "Invalid arguments format."
66
+
67
+ logging.info(f"Executing function: {function_name} with arguments: {arguments}")
68
+
69
+ # if function_name == "web_search":
70
+ # return self._handle_web_search(arguments)
71
+ # elif function_name == "general_ai":
72
+ # return self._handle_general_ai(arguments)
73
+ # else:
74
+ # return f"Function '{function_name}' is not implemented."
75
+
76
+ # def _handle_web_search(self, arguments: dict) -> str:
77
+ # query = arguments.get("query")
78
+ # if not query:
79
+ # return "Please provide a search query."
80
+
81
+ # search_results = self.webs.text(query, max_results=3)
82
+ # formatted_results = "\n\n".join(
83
+ # f"{i+1}. {result['title']}\n{result['body']}\nURL: {result['href']}"
84
+ # for i, result in enumerate(search_results)
85
+ # )
86
+ # return f"Here's what I found:\n\n{formatted_results}"
87
+
88
+ # def _handle_general_ai(self, arguments: dict) -> str:
89
+ # question = arguments.get("question")
90
+ # if not question:
91
+ # return "Please provide a question for the AI to answer."
92
+
93
+ # response = self.LLAMA3.chat(question)
94
+ # return response
95
+
96
+ # Example usage
97
+ if __name__ == "__main__":
98
+ tools = [
99
+ {
100
+ "type": "function",
101
+ "function": {
102
+ "name": "web_search",
103
+ "description": "Search query on Google",
104
+ "parameters": {
105
+ "type": "object",
106
+ "properties": {
107
+ "query": {
108
+ "type": "string",
109
+ "description": "web search query"
110
+ }
111
+ },
112
+ "required": ["query"]
113
+ }
114
+ }
115
+ },
116
+ {
117
+ "type": "function",
118
+ "function": {
119
+ "name": "general_ai",
120
+ "description": "Use AI to answer a general question",
121
+ "parameters": {
122
+ "type": "object",
123
+ "properties": {
124
+ "question": {
125
+ "type": "string",
126
+ "description": "The question to be answered by the AI"
127
+ }
128
+ },
129
+ "required": ["question"]
130
+ }
131
+ }
132
+ }
133
+ ]
134
+
135
+ agent = FunctionCallingAgent(tools=tools)
136
+ message = "open yt"
137
+ function_call_data = agent.function_call_handler(message)
138
+ print(f"Function Call Data: {function_call_data}")
139
+
140
+ if "error" not in function_call_data:
141
+ result = agent.execute_function(function_call_data)
142
+ print(f"Function Execution Result: {result}")
@@ -0,0 +1,179 @@
1
+ import warnings
2
+ from datetime import time
3
+ import os
4
+ import sys
5
+ import subprocess
6
+ import logging
7
+ import psutil
8
+ from huggingface_hub import hf_hub_url, cached_download
9
+ import colorlog
10
+ import ollama # Import ollama for interactive chat
11
+ import argparse # Import argparse for command-line arguments
12
+
13
+ # Suppress specific warnings
14
+ warnings.filterwarnings(
15
+ "ignore", category=FutureWarning, module="huggingface_hub.file_download"
16
+ )
17
+
18
+ # Configure logging with colors
19
+ handler = colorlog.StreamHandler()
20
+ handler.setFormatter(
21
+ colorlog.ColoredFormatter(
22
+ "%(log_color)s%(asctime)s - %(levelname)s - %(message)s",
23
+ datefmt="%Y-%m-%d %H:%M:%S",
24
+ log_colors={
25
+ "DEBUG": "cyan",
26
+ "INFO": "green",
27
+ "WARNING": "yellow",
28
+ "ERROR": "red",
29
+ "CRITICAL": "red,bg_white",
30
+ },
31
+ )
32
+ )
33
+
34
+ logger = colorlog.getLogger(__name__)
35
+ if not logger.hasHandlers():
36
+ logger.addHandler(handler)
37
+ logger.setLevel(logging.INFO)
38
+
39
+ # Redirect warnings to the logger but avoid duplication
40
+ logging.captureWarnings(True)
41
+ py_warnings_logger = logging.getLogger("py.warnings")
42
+ if not py_warnings_logger.hasHandlers():
43
+ py_warnings_logger.addHandler(handler)
44
+
45
+
46
+ def show_art():
47
+ logger.info("Made with love in India")
48
+
49
+
50
+ def usage():
51
+ logger.info("Usage: python script.py -m <model_path> -g <gguf_file>")
52
+ logger.info("Options:")
53
+ logger.info(" -m <model_path> Set the path to the model")
54
+ logger.info(" -g <gguf_file> Set the GGUF file name")
55
+ logger.info(" -h Display this help and exit")
56
+
57
+
58
+ def is_model_downloaded(logging_name, download_log):
59
+ if not os.path.exists(download_log):
60
+ return False
61
+ with open(download_log, "r") as f:
62
+ for line in f:
63
+ if line.strip() == logging_name:
64
+ return True
65
+ return False
66
+
67
+
68
+ def log_downloaded_model(logging_name, download_log):
69
+ with open(download_log, "a") as f:
70
+ f.write(logging_name + "\n")
71
+
72
+
73
+ def is_model_created(model_name):
74
+ result = subprocess.run(["ollama", "list"], stdout=subprocess.PIPE)
75
+ return model_name in result.stdout.decode("utf-8")
76
+
77
+
78
+ def download_model(repo_id, filename, token, cache_dir="downloads"):
79
+ url = hf_hub_url(repo_id, filename)
80
+ filepath = cached_download(
81
+ url, cache_dir=cache_dir, force_filename=filename, use_auth_token=token
82
+ )
83
+ return filepath
84
+
85
+
86
+ def is_ollama_running():
87
+ for proc in psutil.process_iter(["name"]):
88
+ if proc.info["name"] in ["ollama", "ollama.exe"]:
89
+ return True
90
+ return False
91
+
92
+
93
+ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and non-CLI
94
+ show_art()
95
+
96
+ # Parse command-line arguments if provided
97
+ parser = argparse.ArgumentParser(description="Download and create an Ollama model")
98
+ parser.add_argument("-m", "--model_path", help="Path to the model on Hugging Face Hub")
99
+ parser.add_argument("-g", "--gguf_file", help="Name of the GGUF file")
100
+ args = parser.parse_args()
101
+
102
+ # Use arguments from command line or function parameters
103
+ model_path = args.model_path if args.model_path else model_path
104
+ gguf_file = args.gguf_file if args.gguf_file else gguf_file
105
+
106
+ if not model_path or not gguf_file:
107
+ logger.error("Error: model_path and gguf_file are required.")
108
+ usage()
109
+ sys.exit(2)
110
+
111
+ model_name = gguf_file.split(".Q4")[0]
112
+ download_log = "downloaded_models.log"
113
+ logging_name = f"{model_path}_{model_name}"
114
+
115
+ # Ensure the log file exists
116
+ if not os.path.exists(download_log):
117
+ with open(download_log, 'w') as f:
118
+ pass
119
+
120
+ # Check if huggingface-hub is installed, and install it if not
121
+ try:
122
+ subprocess.check_output(['pip', 'show', 'huggingface-hub'])
123
+ except subprocess.CalledProcessError:
124
+ logger.info("Installing huggingface-hub...")
125
+ subprocess.check_call(['pip', 'install', '-U', 'huggingface_hub[cli]'])
126
+ else:
127
+ logger.info("huggingface-hub is already installed.")
128
+
129
+ # Check if the model has already been downloaded
130
+ if is_model_downloaded(logging_name, download_log):
131
+ logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
132
+ else:
133
+ logger.info(f"Downloading model {logging_name}...")
134
+ token = os.getenv('HUGGINGFACE_TOKEN', None)
135
+ if not token:
136
+ logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
137
+ token = None
138
+
139
+ filepath = download_model(model_path, gguf_file, token)
140
+ log_downloaded_model(logging_name, download_log)
141
+ logger.info(f"Model {logging_name} downloaded and logged.")
142
+
143
+ # Check if Ollama is installed, and install it if not
144
+ try:
145
+ subprocess.check_output(['ollama', '--version'])
146
+ except subprocess.CalledProcessError:
147
+ logger.info("Installing Ollama...")
148
+ subprocess.check_call(['curl', '-fsSL', 'https://ollama.com/install.sh', '|', 'sh'])
149
+ else:
150
+ logger.info("Ollama is already installed.")
151
+
152
+ # Check if Ollama is already running
153
+ if is_ollama_running():
154
+ logger.info("Ollama is already running. Skipping the start.")
155
+ else:
156
+ logger.info("Starting Ollama...")
157
+ subprocess.Popen(['ollama', 'serve'])
158
+
159
+ while not is_ollama_running():
160
+ logger.info("Waiting for Ollama to start...")
161
+ time.sleep(1)
162
+
163
+ logger.info("Ollama has started.")
164
+
165
+ # Check if the model has already been created
166
+ if is_model_created(model_name):
167
+ logger.info(f"Model {model_name} is already created. Skipping creation.")
168
+ else:
169
+ logger.info(f"Creating model {model_name}...")
170
+ with open('Modelfile', 'w') as f:
171
+ f.write(f"FROM ./downloads/{gguf_file}")
172
+ subprocess.check_call(['ollama', 'create', model_name, '-f', 'Modelfile'])
173
+ logger.info(f"Model {model_name} created.")
174
+
175
+ logger.info(f"model name is > {model_name}")
176
+ logger.info(f"Use Ollama run {model_name}")
177
+
178
+ if __name__ == "__main__":
179
+ main()
@@ -122,7 +122,7 @@ class DiscordRocks(Provider):
122
122
 
123
123
  def __init__(
124
124
  self,
125
- model: str = "gpt-4o",
125
+ model: str = "llama-3.1-405b-turbo",
126
126
  max_tokens: int = 4096,
127
127
  temperature: float = 1,
128
128
  top_p: float = 1,
@@ -176,6 +176,7 @@ class DiscordRocks(Provider):
176
176
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
177
177
  "content-type": "application/json",
178
178
  "dnt": "1",
179
+ "authorization": "Bearer missing api key",
179
180
  "origin": "https://llmplayground.net",
180
181
  "priority": "u=1, i",
181
182
  "referer": "https://llmplayground.net/",
@@ -21,7 +21,7 @@ class LLAMA3(Provider):
21
21
  history_offset: int = 10250,
22
22
  act: str = None,
23
23
  model: str = "llama3-8b",
24
- system: str = "Answer as concisely as possible.",
24
+ system: str = "GPT syle",
25
25
  ):
26
26
  """Instantiates Snova
27
27
 
@@ -36,11 +36,12 @@ class OLLAMA(Provider):
36
36
  proxies: dict = {},
37
37
  history_offset: int = 10250,
38
38
  act: str = None,
39
+ system_prompt: str = "You are a helpful and friendly AI assistant.",
39
40
  ):
40
41
  """Instantiates Ollama
41
42
 
42
43
  Args:
43
- model (str, optional): Model name. Defaults to 'llama2'.
44
+ model (str, optional): Model name. Defaults to 'qwen2:0.5b'.
44
45
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
45
46
  max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
46
47
  timeout (int, optional): Http request timeout. Defaults to 30.
@@ -50,12 +51,14 @@ class OLLAMA(Provider):
50
51
  proxies (dict, optional): Http request proxies. Defaults to {}.
51
52
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
53
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
54
+ system_prompt (str, optional): System prompt for Ollama. Defaults to "You are a helpful and friendly AI assistant.".
53
55
  """
54
56
  self.model = model
55
57
  self.is_conversation = is_conversation
56
58
  self.max_tokens_to_sample = max_tokens
57
59
  self.timeout = timeout
58
60
  self.last_response = {}
61
+ self.system_prompt = system_prompt
59
62
 
60
63
  self.__available_optimizers = (
61
64
  method
@@ -110,21 +113,19 @@ class OLLAMA(Provider):
110
113
  )
111
114
 
112
115
  def for_stream():
116
+ # Correctly call ollama.chat with stream=True
113
117
  stream = ollama.chat(model=self.model, messages=[
118
+ {'role': 'system', 'content': self.system_prompt},
114
119
  {'role': 'user', 'content': conversation_prompt}
115
120
  ], stream=True)
116
121
 
117
- message_load = ""
122
+ # Yield each chunk directly
118
123
  for chunk in stream:
119
- message_load += chunk['message']['content']
120
- yield chunk['message']['content'] if raw else dict(text=message_load)
121
- self.last_response.update(dict(text=message_load))
122
- self.conversation.update_chat_history(
123
- prompt, self.get_message(self.last_response)
124
- )
124
+ yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
125
125
 
126
126
  def for_non_stream():
127
127
  response = ollama.chat(model=self.model, messages=[
128
+ {'role': 'system', 'content': self.system_prompt}, # Add system message
128
129
  {'role': 'user', 'content': conversation_prompt}
129
130
  ])
130
131
  self.last_response.update(dict(text=response['message']['content']))
@@ -183,6 +184,6 @@ class OLLAMA(Provider):
183
184
  return response["text"]
184
185
  if __name__ == "__main__":
185
186
  ollama_provider = OLLAMA(model="qwen:0.5b")
186
- response = ollama_provider.chat("hi")
187
+ response = ollama_provider.chat("hi", stream=True)
187
188
  for r in response:
188
- print(r, end="", flush=True)
189
+ print(r, end="", flush=True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.9
3
+ Version: 5.1
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -66,6 +66,8 @@ Requires-Dist: requests_html
66
66
  Requires-Dist: bson
67
67
  Requires-Dist: cloudscraper
68
68
  Requires-Dist: emoji
69
+ Requires-Dist: colorlog
70
+ Requires-Dist: openai
69
71
  Provides-Extra: dev
70
72
  Requires-Dist: ruff>=0.1.6; extra == "dev"
71
73
  Requires-Dist: pytest>=7.4.2; extra == "dev"
@@ -1645,15 +1647,13 @@ gguf.convert(
1645
1647
 
1646
1648
  Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1647
1649
 
1648
- **Example:**
1649
-
1650
1650
  ```python
1651
1651
  from webscout import autollama
1652
1652
 
1653
- autollama(
1654
- model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1655
- gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1656
- )
1653
+ model_path = "Vortex4ai/Jarvis-0.5B"
1654
+ gguf_file = "test2-q4_k_m.gguf"
1655
+
1656
+ autollama.main(model_path, gguf_file)
1657
1657
  ```
1658
1658
 
1659
1659
  **Command Line Usage:**
@@ -40,6 +40,8 @@ requests_html
40
40
  bson
41
41
  cloudscraper
42
42
  emoji
43
+ colorlog
44
+ openai
43
45
 
44
46
  [dev]
45
47
  ruff>=0.1.6
@@ -1,186 +0,0 @@
1
- import json
2
- import logging
3
- from webscout import DeepInfra, WEBS
4
-
5
- class FunctionCallingAgent:
6
- def __init__(self, model: str = "Qwen/Qwen2-72B-Instruct",
7
- system_prompt: str = 'You are a helpful assistant that will always answer what the user wants',
8
- tools: list = None):
9
- """
10
- Initialize the FunctionCallingAgent with the model, system prompt, and tools.
11
-
12
- Args:
13
- model (str): The model to use for deepinfra chat.
14
- system_prompt (str): The system prompt to initialize the model.
15
- tools (list): A list of tools the agent can use.
16
- """
17
- self.deepinfra = DeepInfra(model=model, system_prompt=system_prompt, timeout=300)
18
- self.tools = tools if tools is not None else []
19
-
20
-
21
- def function_call_handler(self, message_text: str) -> dict:
22
- """
23
- Handles function calls based on the provided message text.
24
-
25
- Args:
26
- message_text (str): The input message text from the user.
27
-
28
- Returns:
29
- dict: The extracted function call and arguments.
30
- """
31
- system_message = self._generate_system_message(message_text)
32
- response = self.deepinfra.chat(system_message)
33
- # logging.info(f"Raw response: {response}")
34
-
35
- return self._parse_function_call(response)
36
-
37
- def _generate_system_message(self, user_message: str) -> str:
38
- """
39
- Generates a system message incorporating the user message and available tools.
40
-
41
- Args:
42
- user_message (str): The input message from the user.
43
-
44
- Returns:
45
- str: The formatted system message.
46
- """
47
- tools_description = '\n'.join([f"{tool['function']['name']}: {tool['function'].get('description', '')}" for tool in self.tools])
48
- return (
49
- f"[SYSTEM] You are a helpful and capable AI assistant. "
50
- "Your goal is to understand the user's request and provide accurate and relevant information. "
51
- "You have access to the following tools:\n\n"
52
- f"{tools_description}\n\n"
53
- "To use a tool, please follow this format:\n\n"
54
- "```json\n"
55
- "{{ 'tool_name': 'tool_name', 'tool_input': {{ 'arg_1': 'value_1', 'arg_2': 'value_2', ... }} }}\n"
56
- "```\n\n"
57
- f"[USER] {user_message}"
58
- )
59
-
60
- def _parse_function_call(self, response: str) -> dict:
61
- """
62
- Parses the response from the model to extract the function call.
63
-
64
- Args:
65
- response (str): The raw response from the model.
66
-
67
- Returns:
68
- dict: A dictionary containing the function name and arguments.
69
- """
70
- try:
71
- # Find the JSON-like part of the response
72
- start_idx = response.find("{")
73
- end_idx = response.rfind("}") + 1
74
-
75
- if start_idx == -1 or end_idx == -1:
76
- raise ValueError("No valid JSON structure found in the response.")
77
-
78
- response_json_str = response[start_idx:end_idx]
79
-
80
- # Replace single quotes with double quotes and remove extra braces
81
- response_json_str = response_json_str.replace("'", '"')
82
- response_json_str = response_json_str.replace("{{", "{").replace("}}", "}")
83
-
84
- # Remove any leading or trailing whitespace
85
- response_json_str = response_json_str.strip()
86
-
87
- # Attempt to load the JSON string
88
- return json.loads(response_json_str)
89
-
90
- except (ValueError, json.JSONDecodeError) as e:
91
- logging.error(f"Error parsing function call: {e}")
92
- return {"error": str(e)}
93
-
94
- def execute_function(self, function_call_data: dict) -> str:
95
- """
96
- Executes the specified function with the provided arguments.
97
-
98
- Args:
99
- function_call_data (dict): A dictionary containing the function name and arguments.
100
-
101
- Returns:
102
- str: The result of the function execution.
103
- """
104
- function_name = function_call_data.get("tool_name") # Use 'tool_name' instead of 'name'
105
- arguments = function_call_data.get("tool_input", {}) # Use 'tool_input' instead of 'arguments'
106
-
107
- if not isinstance(arguments, dict):
108
- logging.error("Invalid arguments format.")
109
- return "Invalid arguments format."
110
-
111
- logging.info(f"Executing function: {function_name} with arguments: {arguments}")
112
-
113
- if function_name == "web_search":
114
- return self._handle_web_search(arguments)
115
- else:
116
- return f"Function '{function_name}' is not implemented."
117
-
118
- # def _handle_web_search(self, arguments: dict) -> str:
119
- # """
120
- # Handles web search queries using the WEBS tool.
121
-
122
- # Args:
123
- # arguments (dict): A dictionary containing the query argument.
124
-
125
- # Returns:
126
- # str: The result of the web search.
127
- # """
128
- # query = arguments.get("query")
129
- # if not query:
130
- # return "Please provide a search query."
131
-
132
- # search_results = self.webs.text(query)
133
- # # Additional processing of search results can be done here if needed.
134
- # return f"Here's what I found:\n\n{search_results}"
135
-
136
- # Example usage
137
- if __name__ == "__main__":
138
- tools = [
139
- {
140
- "type": "function",
141
- "function": {
142
- "name": "UserDetail",
143
- "parameters": {
144
- "type": "object",
145
- "title": "UserDetail",
146
- "properties": {
147
- "name": {
148
- "title": "Name",
149
- "type": "string"
150
- },
151
- "age": {
152
- "title": "Age",
153
- "type": "integer"
154
- }
155
- },
156
- "required": ["name", "age"]
157
- }
158
- }
159
- },
160
- {
161
- "type": "function",
162
- "function": {
163
- "name": "web_search",
164
- "description": "Search query on google",
165
- "parameters": {
166
- "type": "object",
167
- "properties": {
168
- "query": {
169
- "type": "string",
170
- "description": "web search query"
171
- }
172
- },
173
- "required": ["query"]
174
- }
175
- }
176
- }
177
- ]
178
-
179
- agent = FunctionCallingAgent(tools=tools)
180
- message = "websearch about helpingai-9b"
181
- function_call_data = agent.function_call_handler(message)
182
- print(f"Function Call Data: {function_call_data}")
183
-
184
- if "error" not in function_call_data:
185
- result = agent.execute_function(function_call_data)
186
- print(f"Function Execution Result: {result}")
@@ -1,209 +0,0 @@
1
- import subprocess
2
- import argparse
3
- import os
4
- from rich.console import Console
5
- from rich.panel import Panel
6
- from rich.progress import track
7
- from yaspin import yaspin
8
- from pyfiglet import figlet_format
9
- import time
10
-
11
- console = Console()
12
-
13
- def autollama(model_path, gguf_file):
14
- """Manages models with Ollama using the autollama.sh script.
15
-
16
- Args:
17
- model_path (str): The path to the Hugging Face model.
18
- gguf_file (str): The name of the GGUF file.
19
- """
20
- console.print(f"[bold green]{figlet_format('Autollama')}[/]\n", justify="center")
21
-
22
- # Check if autollama.sh exists in the current working directory
23
- script_path = os.path.join(os.getcwd(), "autollama.sh")
24
- if not os.path.exists(script_path):
25
- # Create autollama.sh with the content provided
26
- with open(script_path, "w") as f:
27
- f.write("""
28
- function show_art() {
29
- cat << "EOF"
30
- Made with love in India
31
- EOF
32
- }
33
-
34
- show_art
35
-
36
- # Initialize default values
37
- MODEL_PATH=""
38
- GGUF_FILE=""
39
-
40
- # Display help/usage information
41
- usage() {
42
- echo "Usage: $0 -m <model_path> -g <gguf_file>"
43
- echo
44
- echo "Options:"
45
- echo " -m <model_path> Set the path to the model"
46
- echo " -g <gguf_file> Set the GGUF file name"
47
- echo " -h Display this help and exit"
48
- echo
49
- }
50
-
51
- # Parse command-line options
52
- while getopts ":m:g:h" opt; do
53
- case ${opt} in
54
- m )
55
- MODEL_PATH=$OPTARG
56
- ;;
57
- g )
58
- GGUF_FILE=$OPTARG
59
- ;;
60
- h )
61
- usage
62
- exit 0
63
- ;;
64
- \? )
65
- echo "Invalid Option: -$OPTARG" 1>&2
66
- usage
67
- exit 1
68
- ;;
69
- : )
70
- echo "Invalid Option: -$OPTARG requires an argument" 1>&2
71
- usage
72
- exit 1
73
- ;;
74
- esac
75
- done
76
-
77
- # Check required parameters
78
- if [ -z "$MODEL_PATH" ] || [ -z "$GGUF_FILE" ]; then
79
- echo "Error: -m (model_path) and -g (gguf_file) are required."
80
- usage
81
- exit 1
82
- fi
83
-
84
- # Derive MODEL_NAME
85
- MODEL_NAME=$(echo $GGUF_FILE | sed 's/\(.*\)\.Q4.*/\\1/')
86
-
87
- # Log file where downloaded models are recorded
88
- DOWNLOAD_LOG="downloaded_models.log"
89
-
90
- # Composite logging name
91
- LOGGING_NAME="${MODEL_PATH}_${MODEL_NAME}"
92
-
93
- # Check if the model has been downloaded
94
- function is_model_downloaded {
95
- grep -qxF "$LOGGING_NAME" "$DOWNLOAD_LOG" && return 0 || return 1
96
- }
97
-
98
- # Log the downloaded model
99
- function log_downloaded_model {
100
- echo "$LOGGING_NAME" >> "$DOWNLOAD_LOG"
101
- }
102
-
103
- # Function to check if the model has already been created
104
- function is_model_created {
105
- # 'ollama list' lists all models
106
- ollama list | grep -q "$MODEL_NAME" && return 0 || return 1
107
- }
108
-
109
- # Check if huggingface-hub is installed, and install it if not
110
- if ! pip show huggingface-hub > /dev/null; then
111
- echo "Installing huggingface-hub..."
112
- pip install -U "huggingface_hub[cli]"
113
- else
114
- echo "huggingface-hub is already installed."
115
- fi
116
-
117
- # Check if the model has already been downloaded
118
- if is_model_downloaded; then
119
- echo "Model $LOGGING_NAME has already been downloaded. Skipping download."
120
- else
121
- echo "Downloading model $LOGGING_NAME..."
122
- # Download the model
123
- huggingface-cli download $MODEL_PATH $GGUF_FILE --local-dir downloads --local-dir-use-symlinks False
124
-
125
- # Log the downloaded model
126
- log_downloaded_model
127
- echo "Model $LOGGING_NAME downloaded and logged."
128
- fi
129
-
130
- # Check if Ollama is installed, and install it if not
131
- if ! command -v ollama &> /dev/null; then
132
- echo "Installing Ollama..."
133
- curl -fsSL https://ollama.com/install.sh | sh
134
- else
135
- echo "Ollama is already installed."
136
- fi
137
-
138
- # Check if Ollama is already running
139
- if pgrep -f 'ollama serve' > /dev/null; then
140
- echo "Ollama is already running. Skipping the start."
141
- else
142
- echo "Starting Ollama..."
143
- # Start Ollama in the background
144
- ollama serve &
145
-
146
- # Wait for Ollama to start
147
- while true; do
148
- if pgrep -f 'ollama serve' > /dev/null; then
149
- echo "Ollama has started."
150
- sleep 60
151
- break
152
- else
153
- echo "Waiting for Ollama to start..."
154
- sleep 1 # Wait for 1 second before checking again
155
- fi
156
- done
157
- fi
158
-
159
- # Check if the model has already been created
160
- if is_model_created; then
161
- echo "Model $MODEL_NAME is already created. Skipping creation."
162
- else
163
- echo "Creating model $MODEL_NAME..."
164
- # Create the model in Ollama
165
- # Prepare Modelfile with the downloaded path
166
- echo "FROM ./downloads/$GGUF_FILE" > Modelfile
167
- ollama create $MODEL_NAME -f Modelfile
168
- echo "Model $MODEL_NAME created."
169
- fi
170
-
171
-
172
- echo "model name is > $MODEL_NAME"
173
- echo "Use Ollama run $MODEL_NAME"
174
- """)
175
- # Make autollama.sh executable (using chmod)
176
- os.chmod(script_path, 0o755)
177
-
178
- # Initialize command list
179
- command = ["bash", script_path, "-m", model_path, "-g", gguf_file]
180
-
181
- # Execute the command
182
- process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
183
-
184
- for line in iter(process.stdout.readline, ''):
185
- console.print(Panel(line.strip(), title="Autollama Output", expand=False))
186
-
187
- for line in iter(process.stderr.readline, ''):
188
- console.print(Panel(line.strip(), title="Autollama Errors (if any)", expand=False))
189
-
190
- process.wait()
191
- console.print("[green]Model is ready![/]")
192
-
193
- def main():
194
- parser = argparse.ArgumentParser(description='Automatically create and run an Ollama model in Ollama')
195
- parser.add_argument('-m', '--model_path', required=True, help='Set the huggingface model id to the Hugging Face model')
196
- parser.add_argument('-g', '--gguf_file', required=True, help='Set the GGUF file name')
197
- args = parser.parse_args()
198
-
199
- try:
200
- with yaspin(text="Processing...") as spinner:
201
- autollama(args.model_path, args.gguf_file)
202
- spinner.ok("Done!")
203
- except Exception as e:
204
- console.print(f"[red]Error: {e}[/]")
205
- exit(1)
206
-
207
- if __name__ == "__main__":
208
- main()
209
-
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes