webscout 4.9__py3-none-any.whl → 5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Agents/functioncall.py +58 -102
- webscout/Extra/autollama.py +173 -203
- webscout/Provider/DiscordRocks.py +2 -1
- webscout/Provider/Llama3.py +1 -1
- webscout/Provider/OLLAMA.py +11 -10
- {webscout-4.9.dist-info → webscout-5.1.dist-info}/METADATA +7 -7
- {webscout-4.9.dist-info → webscout-5.1.dist-info}/RECORD +11 -11
- {webscout-4.9.dist-info → webscout-5.1.dist-info}/LICENSE.md +0 -0
- {webscout-4.9.dist-info → webscout-5.1.dist-info}/WHEEL +0 -0
- {webscout-4.9.dist-info → webscout-5.1.dist-info}/entry_points.txt +0 -0
- {webscout-4.9.dist-info → webscout-5.1.dist-info}/top_level.txt +0 -0
webscout/Agents/functioncall.py
CHANGED
|
@@ -1,72 +1,44 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import logging
|
|
3
|
-
from webscout import
|
|
3
|
+
from webscout import LLAMA3, WEBS
|
|
4
4
|
|
|
5
5
|
class FunctionCallingAgent:
|
|
6
|
-
def __init__(self, model: str = "
|
|
6
|
+
def __init__(self, model: str = "llama3-8b",
|
|
7
7
|
system_prompt: str = 'You are a helpful assistant that will always answer what the user wants',
|
|
8
8
|
tools: list = None):
|
|
9
|
-
|
|
10
|
-
Initialize the FunctionCallingAgent with the model, system prompt, and tools.
|
|
11
|
-
|
|
12
|
-
Args:
|
|
13
|
-
model (str): The model to use for deepinfra chat.
|
|
14
|
-
system_prompt (str): The system prompt to initialize the model.
|
|
15
|
-
tools (list): A list of tools the agent can use.
|
|
16
|
-
"""
|
|
17
|
-
self.deepinfra = DeepInfra(model=model, system_prompt=system_prompt, timeout=300)
|
|
9
|
+
self.LLAMA3 = LLAMA3(model=model, system=system_prompt, timeout=300)
|
|
18
10
|
self.tools = tools if tools is not None else []
|
|
19
|
-
|
|
11
|
+
self.webs = WEBS()
|
|
20
12
|
|
|
21
13
|
def function_call_handler(self, message_text: str) -> dict:
|
|
22
|
-
"""
|
|
23
|
-
Handles function calls based on the provided message text.
|
|
24
|
-
|
|
25
|
-
Args:
|
|
26
|
-
message_text (str): The input message text from the user.
|
|
27
|
-
|
|
28
|
-
Returns:
|
|
29
|
-
dict: The extracted function call and arguments.
|
|
30
|
-
"""
|
|
31
14
|
system_message = self._generate_system_message(message_text)
|
|
32
|
-
response = self.
|
|
15
|
+
response = self.LLAMA3.chat(system_message)
|
|
33
16
|
# logging.info(f"Raw response: {response}")
|
|
34
|
-
|
|
35
17
|
return self._parse_function_call(response)
|
|
36
18
|
|
|
37
19
|
def _generate_system_message(self, user_message: str) -> str:
|
|
38
|
-
""
|
|
39
|
-
Generates a system message incorporating the user message and available tools.
|
|
40
|
-
|
|
41
|
-
Args:
|
|
42
|
-
user_message (str): The input message from the user.
|
|
43
|
-
|
|
44
|
-
Returns:
|
|
45
|
-
str: The formatted system message.
|
|
46
|
-
"""
|
|
47
|
-
tools_description = '\n'.join([f"{tool['function']['name']}: {tool['function'].get('description', '')}" for tool in self.tools])
|
|
20
|
+
tools_description = '\n'.join([f"- {tool['function']['name']}: {tool['function'].get('description', '')}" for tool in self.tools])
|
|
48
21
|
return (
|
|
49
|
-
|
|
50
|
-
"
|
|
51
|
-
"
|
|
52
|
-
|
|
53
|
-
"
|
|
54
|
-
"
|
|
55
|
-
"
|
|
56
|
-
"
|
|
57
|
-
|
|
22
|
+
"You are an AI assistant capable of understanding user requests and using tools to fulfill them. "
|
|
23
|
+
"Always respond using the JSON format specified below, even if you're not sure about the answer. "
|
|
24
|
+
f"Available tools:\n{tools_description}\n\n"
|
|
25
|
+
"Instructions:\n"
|
|
26
|
+
"1. Analyze the user's request.\n"
|
|
27
|
+
"2. Choose the most appropriate tool based on the request.\n"
|
|
28
|
+
"3. Respond ONLY with a JSON object in this exact format:\n"
|
|
29
|
+
"{\n"
|
|
30
|
+
' "tool_name": "name_of_the_tool",\n'
|
|
31
|
+
' "tool_input": {\n'
|
|
32
|
+
' "param1": "value1",\n'
|
|
33
|
+
' "param2": "value2"\n'
|
|
34
|
+
" }\n"
|
|
35
|
+
"}\n\n"
|
|
36
|
+
"If you can't determine a suitable tool, use the 'general_ai' tool with the user's message as the 'question' parameter.\n\n"
|
|
37
|
+
f"User request: {user_message}\n\n"
|
|
38
|
+
"Your response (in JSON format):"
|
|
58
39
|
)
|
|
59
40
|
|
|
60
41
|
def _parse_function_call(self, response: str) -> dict:
|
|
61
|
-
"""
|
|
62
|
-
Parses the response from the model to extract the function call.
|
|
63
|
-
|
|
64
|
-
Args:
|
|
65
|
-
response (str): The raw response from the model.
|
|
66
|
-
|
|
67
|
-
Returns:
|
|
68
|
-
dict: A dictionary containing the function name and arguments.
|
|
69
|
-
"""
|
|
70
42
|
try:
|
|
71
43
|
# Find the JSON-like part of the response
|
|
72
44
|
start_idx = response.find("{")
|
|
@@ -76,14 +48,7 @@ class FunctionCallingAgent:
|
|
|
76
48
|
raise ValueError("No valid JSON structure found in the response.")
|
|
77
49
|
|
|
78
50
|
response_json_str = response[start_idx:end_idx]
|
|
79
|
-
|
|
80
|
-
# Replace single quotes with double quotes and remove extra braces
|
|
81
|
-
response_json_str = response_json_str.replace("'", '"')
|
|
82
|
-
response_json_str = response_json_str.replace("{{", "{").replace("}}", "}")
|
|
83
51
|
|
|
84
|
-
# Remove any leading or trailing whitespace
|
|
85
|
-
response_json_str = response_json_str.strip()
|
|
86
|
-
|
|
87
52
|
# Attempt to load the JSON string
|
|
88
53
|
return json.loads(response_json_str)
|
|
89
54
|
|
|
@@ -92,17 +57,8 @@ class FunctionCallingAgent:
|
|
|
92
57
|
return {"error": str(e)}
|
|
93
58
|
|
|
94
59
|
def execute_function(self, function_call_data: dict) -> str:
|
|
95
|
-
""
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
function_call_data (dict): A dictionary containing the function name and arguments.
|
|
100
|
-
|
|
101
|
-
Returns:
|
|
102
|
-
str: The result of the function execution.
|
|
103
|
-
"""
|
|
104
|
-
function_name = function_call_data.get("tool_name") # Use 'tool_name' instead of 'name'
|
|
105
|
-
arguments = function_call_data.get("tool_input", {}) # Use 'tool_input' instead of 'arguments'
|
|
60
|
+
function_name = function_call_data.get("tool_name")
|
|
61
|
+
arguments = function_call_data.get("tool_input", {})
|
|
106
62
|
|
|
107
63
|
if not isinstance(arguments, dict):
|
|
108
64
|
logging.error("Invalid arguments format.")
|
|
@@ -110,28 +66,32 @@ class FunctionCallingAgent:
|
|
|
110
66
|
|
|
111
67
|
logging.info(f"Executing function: {function_name} with arguments: {arguments}")
|
|
112
68
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
69
|
+
# if function_name == "web_search":
|
|
70
|
+
# return self._handle_web_search(arguments)
|
|
71
|
+
# elif function_name == "general_ai":
|
|
72
|
+
# return self._handle_general_ai(arguments)
|
|
73
|
+
# else:
|
|
74
|
+
# return f"Function '{function_name}' is not implemented."
|
|
117
75
|
|
|
118
76
|
# def _handle_web_search(self, arguments: dict) -> str:
|
|
119
|
-
# """
|
|
120
|
-
# Handles web search queries using the WEBS tool.
|
|
121
|
-
|
|
122
|
-
# Args:
|
|
123
|
-
# arguments (dict): A dictionary containing the query argument.
|
|
124
|
-
|
|
125
|
-
# Returns:
|
|
126
|
-
# str: The result of the web search.
|
|
127
|
-
# """
|
|
128
77
|
# query = arguments.get("query")
|
|
129
78
|
# if not query:
|
|
130
79
|
# return "Please provide a search query."
|
|
131
80
|
|
|
132
|
-
# search_results = self.webs.text(query)
|
|
133
|
-
#
|
|
134
|
-
#
|
|
81
|
+
# search_results = self.webs.text(query, max_results=3)
|
|
82
|
+
# formatted_results = "\n\n".join(
|
|
83
|
+
# f"{i+1}. {result['title']}\n{result['body']}\nURL: {result['href']}"
|
|
84
|
+
# for i, result in enumerate(search_results)
|
|
85
|
+
# )
|
|
86
|
+
# return f"Here's what I found:\n\n{formatted_results}"
|
|
87
|
+
|
|
88
|
+
# def _handle_general_ai(self, arguments: dict) -> str:
|
|
89
|
+
# question = arguments.get("question")
|
|
90
|
+
# if not question:
|
|
91
|
+
# return "Please provide a question for the AI to answer."
|
|
92
|
+
|
|
93
|
+
# response = self.LLAMA3.chat(question)
|
|
94
|
+
# return response
|
|
135
95
|
|
|
136
96
|
# Example usage
|
|
137
97
|
if __name__ == "__main__":
|
|
@@ -139,48 +99,44 @@ if __name__ == "__main__":
|
|
|
139
99
|
{
|
|
140
100
|
"type": "function",
|
|
141
101
|
"function": {
|
|
142
|
-
"name": "
|
|
102
|
+
"name": "web_search",
|
|
103
|
+
"description": "Search query on Google",
|
|
143
104
|
"parameters": {
|
|
144
105
|
"type": "object",
|
|
145
|
-
"title": "UserDetail",
|
|
146
106
|
"properties": {
|
|
147
|
-
"
|
|
148
|
-
"
|
|
149
|
-
"
|
|
150
|
-
},
|
|
151
|
-
"age": {
|
|
152
|
-
"title": "Age",
|
|
153
|
-
"type": "integer"
|
|
107
|
+
"query": {
|
|
108
|
+
"type": "string",
|
|
109
|
+
"description": "web search query"
|
|
154
110
|
}
|
|
155
111
|
},
|
|
156
|
-
"required": ["
|
|
112
|
+
"required": ["query"]
|
|
157
113
|
}
|
|
158
114
|
}
|
|
159
115
|
},
|
|
160
116
|
{
|
|
161
117
|
"type": "function",
|
|
162
118
|
"function": {
|
|
163
|
-
"name": "
|
|
164
|
-
"description": "
|
|
119
|
+
"name": "general_ai",
|
|
120
|
+
"description": "Use AI to answer a general question",
|
|
165
121
|
"parameters": {
|
|
166
122
|
"type": "object",
|
|
167
123
|
"properties": {
|
|
168
|
-
"
|
|
124
|
+
"question": {
|
|
169
125
|
"type": "string",
|
|
170
|
-
"description": "
|
|
126
|
+
"description": "The question to be answered by the AI"
|
|
171
127
|
}
|
|
172
128
|
},
|
|
173
|
-
"required": ["
|
|
129
|
+
"required": ["question"]
|
|
174
130
|
}
|
|
175
131
|
}
|
|
176
132
|
}
|
|
177
133
|
]
|
|
178
134
|
|
|
179
135
|
agent = FunctionCallingAgent(tools=tools)
|
|
180
|
-
message = "
|
|
136
|
+
message = "open yt"
|
|
181
137
|
function_call_data = agent.function_call_handler(message)
|
|
182
138
|
print(f"Function Call Data: {function_call_data}")
|
|
183
139
|
|
|
184
140
|
if "error" not in function_call_data:
|
|
185
141
|
result = agent.execute_function(function_call_data)
|
|
186
|
-
print(f"Function Execution Result: {result}")
|
|
142
|
+
print(f"Function Execution Result: {result}")
|
webscout/Extra/autollama.py
CHANGED
|
@@ -1,209 +1,179 @@
|
|
|
1
|
-
import
|
|
2
|
-
import
|
|
1
|
+
import warnings
|
|
2
|
+
from datetime import time
|
|
3
3
|
import os
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
from
|
|
9
|
-
import
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
#
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
echo "$LOGGING_NAME" >> "$DOWNLOAD_LOG"
|
|
101
|
-
}
|
|
102
|
-
|
|
103
|
-
# Function to check if the model has already been created
|
|
104
|
-
function is_model_created {
|
|
105
|
-
# 'ollama list' lists all models
|
|
106
|
-
ollama list | grep -q "$MODEL_NAME" && return 0 || return 1
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
# Check if huggingface-hub is installed, and install it if not
|
|
110
|
-
if ! pip show huggingface-hub > /dev/null; then
|
|
111
|
-
echo "Installing huggingface-hub..."
|
|
112
|
-
pip install -U "huggingface_hub[cli]"
|
|
113
|
-
else
|
|
114
|
-
echo "huggingface-hub is already installed."
|
|
115
|
-
fi
|
|
116
|
-
|
|
117
|
-
# Check if the model has already been downloaded
|
|
118
|
-
if is_model_downloaded; then
|
|
119
|
-
echo "Model $LOGGING_NAME has already been downloaded. Skipping download."
|
|
120
|
-
else
|
|
121
|
-
echo "Downloading model $LOGGING_NAME..."
|
|
122
|
-
# Download the model
|
|
123
|
-
huggingface-cli download $MODEL_PATH $GGUF_FILE --local-dir downloads --local-dir-use-symlinks False
|
|
124
|
-
|
|
125
|
-
# Log the downloaded model
|
|
126
|
-
log_downloaded_model
|
|
127
|
-
echo "Model $LOGGING_NAME downloaded and logged."
|
|
128
|
-
fi
|
|
129
|
-
|
|
130
|
-
# Check if Ollama is installed, and install it if not
|
|
131
|
-
if ! command -v ollama &> /dev/null; then
|
|
132
|
-
echo "Installing Ollama..."
|
|
133
|
-
curl -fsSL https://ollama.com/install.sh | sh
|
|
134
|
-
else
|
|
135
|
-
echo "Ollama is already installed."
|
|
136
|
-
fi
|
|
137
|
-
|
|
138
|
-
# Check if Ollama is already running
|
|
139
|
-
if pgrep -f 'ollama serve' > /dev/null; then
|
|
140
|
-
echo "Ollama is already running. Skipping the start."
|
|
141
|
-
else
|
|
142
|
-
echo "Starting Ollama..."
|
|
143
|
-
# Start Ollama in the background
|
|
144
|
-
ollama serve &
|
|
145
|
-
|
|
146
|
-
# Wait for Ollama to start
|
|
147
|
-
while true; do
|
|
148
|
-
if pgrep -f 'ollama serve' > /dev/null; then
|
|
149
|
-
echo "Ollama has started."
|
|
150
|
-
sleep 60
|
|
151
|
-
break
|
|
152
|
-
else
|
|
153
|
-
echo "Waiting for Ollama to start..."
|
|
154
|
-
sleep 1 # Wait for 1 second before checking again
|
|
155
|
-
fi
|
|
156
|
-
done
|
|
157
|
-
fi
|
|
158
|
-
|
|
159
|
-
# Check if the model has already been created
|
|
160
|
-
if is_model_created; then
|
|
161
|
-
echo "Model $MODEL_NAME is already created. Skipping creation."
|
|
162
|
-
else
|
|
163
|
-
echo "Creating model $MODEL_NAME..."
|
|
164
|
-
# Create the model in Ollama
|
|
165
|
-
# Prepare Modelfile with the downloaded path
|
|
166
|
-
echo "FROM ./downloads/$GGUF_FILE" > Modelfile
|
|
167
|
-
ollama create $MODEL_NAME -f Modelfile
|
|
168
|
-
echo "Model $MODEL_NAME created."
|
|
169
|
-
fi
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
echo "model name is > $MODEL_NAME"
|
|
173
|
-
echo "Use Ollama run $MODEL_NAME"
|
|
174
|
-
""")
|
|
175
|
-
# Make autollama.sh executable (using chmod)
|
|
176
|
-
os.chmod(script_path, 0o755)
|
|
177
|
-
|
|
178
|
-
# Initialize command list
|
|
179
|
-
command = ["bash", script_path, "-m", model_path, "-g", gguf_file]
|
|
180
|
-
|
|
181
|
-
# Execute the command
|
|
182
|
-
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
|
183
|
-
|
|
184
|
-
for line in iter(process.stdout.readline, ''):
|
|
185
|
-
console.print(Panel(line.strip(), title="Autollama Output", expand=False))
|
|
186
|
-
|
|
187
|
-
for line in iter(process.stderr.readline, ''):
|
|
188
|
-
console.print(Panel(line.strip(), title="Autollama Errors (if any)", expand=False))
|
|
189
|
-
|
|
190
|
-
process.wait()
|
|
191
|
-
console.print("[green]Model is ready![/]")
|
|
192
|
-
|
|
193
|
-
def main():
|
|
194
|
-
parser = argparse.ArgumentParser(description='Automatically create and run an Ollama model in Ollama')
|
|
195
|
-
parser.add_argument('-m', '--model_path', required=True, help='Set the huggingface model id to the Hugging Face model')
|
|
196
|
-
parser.add_argument('-g', '--gguf_file', required=True, help='Set the GGUF file name')
|
|
4
|
+
import sys
|
|
5
|
+
import subprocess
|
|
6
|
+
import logging
|
|
7
|
+
import psutil
|
|
8
|
+
from huggingface_hub import hf_hub_url, cached_download
|
|
9
|
+
import colorlog
|
|
10
|
+
import ollama # Import ollama for interactive chat
|
|
11
|
+
import argparse # Import argparse for command-line arguments
|
|
12
|
+
|
|
13
|
+
# Suppress specific warnings
|
|
14
|
+
warnings.filterwarnings(
|
|
15
|
+
"ignore", category=FutureWarning, module="huggingface_hub.file_download"
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
# Configure logging with colors
|
|
19
|
+
handler = colorlog.StreamHandler()
|
|
20
|
+
handler.setFormatter(
|
|
21
|
+
colorlog.ColoredFormatter(
|
|
22
|
+
"%(log_color)s%(asctime)s - %(levelname)s - %(message)s",
|
|
23
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
|
24
|
+
log_colors={
|
|
25
|
+
"DEBUG": "cyan",
|
|
26
|
+
"INFO": "green",
|
|
27
|
+
"WARNING": "yellow",
|
|
28
|
+
"ERROR": "red",
|
|
29
|
+
"CRITICAL": "red,bg_white",
|
|
30
|
+
},
|
|
31
|
+
)
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
logger = colorlog.getLogger(__name__)
|
|
35
|
+
if not logger.hasHandlers():
|
|
36
|
+
logger.addHandler(handler)
|
|
37
|
+
logger.setLevel(logging.INFO)
|
|
38
|
+
|
|
39
|
+
# Redirect warnings to the logger but avoid duplication
|
|
40
|
+
logging.captureWarnings(True)
|
|
41
|
+
py_warnings_logger = logging.getLogger("py.warnings")
|
|
42
|
+
if not py_warnings_logger.hasHandlers():
|
|
43
|
+
py_warnings_logger.addHandler(handler)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def show_art():
|
|
47
|
+
logger.info("Made with love in India")
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def usage():
|
|
51
|
+
logger.info("Usage: python script.py -m <model_path> -g <gguf_file>")
|
|
52
|
+
logger.info("Options:")
|
|
53
|
+
logger.info(" -m <model_path> Set the path to the model")
|
|
54
|
+
logger.info(" -g <gguf_file> Set the GGUF file name")
|
|
55
|
+
logger.info(" -h Display this help and exit")
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def is_model_downloaded(logging_name, download_log):
|
|
59
|
+
if not os.path.exists(download_log):
|
|
60
|
+
return False
|
|
61
|
+
with open(download_log, "r") as f:
|
|
62
|
+
for line in f:
|
|
63
|
+
if line.strip() == logging_name:
|
|
64
|
+
return True
|
|
65
|
+
return False
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def log_downloaded_model(logging_name, download_log):
|
|
69
|
+
with open(download_log, "a") as f:
|
|
70
|
+
f.write(logging_name + "\n")
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def is_model_created(model_name):
|
|
74
|
+
result = subprocess.run(["ollama", "list"], stdout=subprocess.PIPE)
|
|
75
|
+
return model_name in result.stdout.decode("utf-8")
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def download_model(repo_id, filename, token, cache_dir="downloads"):
|
|
79
|
+
url = hf_hub_url(repo_id, filename)
|
|
80
|
+
filepath = cached_download(
|
|
81
|
+
url, cache_dir=cache_dir, force_filename=filename, use_auth_token=token
|
|
82
|
+
)
|
|
83
|
+
return filepath
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def is_ollama_running():
|
|
87
|
+
for proc in psutil.process_iter(["name"]):
|
|
88
|
+
if proc.info["name"] in ["ollama", "ollama.exe"]:
|
|
89
|
+
return True
|
|
90
|
+
return False
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def main(model_path=None, gguf_file=None): # Modified to handle both CLI and non-CLI
|
|
94
|
+
show_art()
|
|
95
|
+
|
|
96
|
+
# Parse command-line arguments if provided
|
|
97
|
+
parser = argparse.ArgumentParser(description="Download and create an Ollama model")
|
|
98
|
+
parser.add_argument("-m", "--model_path", help="Path to the model on Hugging Face Hub")
|
|
99
|
+
parser.add_argument("-g", "--gguf_file", help="Name of the GGUF file")
|
|
197
100
|
args = parser.parse_args()
|
|
198
101
|
|
|
102
|
+
# Use arguments from command line or function parameters
|
|
103
|
+
model_path = args.model_path if args.model_path else model_path
|
|
104
|
+
gguf_file = args.gguf_file if args.gguf_file else gguf_file
|
|
105
|
+
|
|
106
|
+
if not model_path or not gguf_file:
|
|
107
|
+
logger.error("Error: model_path and gguf_file are required.")
|
|
108
|
+
usage()
|
|
109
|
+
sys.exit(2)
|
|
110
|
+
|
|
111
|
+
model_name = gguf_file.split(".Q4")[0]
|
|
112
|
+
download_log = "downloaded_models.log"
|
|
113
|
+
logging_name = f"{model_path}_{model_name}"
|
|
114
|
+
|
|
115
|
+
# Ensure the log file exists
|
|
116
|
+
if not os.path.exists(download_log):
|
|
117
|
+
with open(download_log, 'w') as f:
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
# Check if huggingface-hub is installed, and install it if not
|
|
199
121
|
try:
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
122
|
+
subprocess.check_output(['pip', 'show', 'huggingface-hub'])
|
|
123
|
+
except subprocess.CalledProcessError:
|
|
124
|
+
logger.info("Installing huggingface-hub...")
|
|
125
|
+
subprocess.check_call(['pip', 'install', '-U', 'huggingface_hub[cli]'])
|
|
126
|
+
else:
|
|
127
|
+
logger.info("huggingface-hub is already installed.")
|
|
128
|
+
|
|
129
|
+
# Check if the model has already been downloaded
|
|
130
|
+
if is_model_downloaded(logging_name, download_log):
|
|
131
|
+
logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
|
|
132
|
+
else:
|
|
133
|
+
logger.info(f"Downloading model {logging_name}...")
|
|
134
|
+
token = os.getenv('HUGGINGFACE_TOKEN', None)
|
|
135
|
+
if not token:
|
|
136
|
+
logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
|
|
137
|
+
token = None
|
|
138
|
+
|
|
139
|
+
filepath = download_model(model_path, gguf_file, token)
|
|
140
|
+
log_downloaded_model(logging_name, download_log)
|
|
141
|
+
logger.info(f"Model {logging_name} downloaded and logged.")
|
|
142
|
+
|
|
143
|
+
# Check if Ollama is installed, and install it if not
|
|
144
|
+
try:
|
|
145
|
+
subprocess.check_output(['ollama', '--version'])
|
|
146
|
+
except subprocess.CalledProcessError:
|
|
147
|
+
logger.info("Installing Ollama...")
|
|
148
|
+
subprocess.check_call(['curl', '-fsSL', 'https://ollama.com/install.sh', '|', 'sh'])
|
|
149
|
+
else:
|
|
150
|
+
logger.info("Ollama is already installed.")
|
|
151
|
+
|
|
152
|
+
# Check if Ollama is already running
|
|
153
|
+
if is_ollama_running():
|
|
154
|
+
logger.info("Ollama is already running. Skipping the start.")
|
|
155
|
+
else:
|
|
156
|
+
logger.info("Starting Ollama...")
|
|
157
|
+
subprocess.Popen(['ollama', 'serve'])
|
|
158
|
+
|
|
159
|
+
while not is_ollama_running():
|
|
160
|
+
logger.info("Waiting for Ollama to start...")
|
|
161
|
+
time.sleep(1)
|
|
162
|
+
|
|
163
|
+
logger.info("Ollama has started.")
|
|
164
|
+
|
|
165
|
+
# Check if the model has already been created
|
|
166
|
+
if is_model_created(model_name):
|
|
167
|
+
logger.info(f"Model {model_name} is already created. Skipping creation.")
|
|
168
|
+
else:
|
|
169
|
+
logger.info(f"Creating model {model_name}...")
|
|
170
|
+
with open('Modelfile', 'w') as f:
|
|
171
|
+
f.write(f"FROM ./downloads/{gguf_file}")
|
|
172
|
+
subprocess.check_call(['ollama', 'create', model_name, '-f', 'Modelfile'])
|
|
173
|
+
logger.info(f"Model {model_name} created.")
|
|
174
|
+
|
|
175
|
+
logger.info(f"model name is > {model_name}")
|
|
176
|
+
logger.info(f"Use Ollama run {model_name}")
|
|
206
177
|
|
|
207
178
|
if __name__ == "__main__":
|
|
208
|
-
main()
|
|
209
|
-
|
|
179
|
+
main()
|
|
@@ -122,7 +122,7 @@ class DiscordRocks(Provider):
|
|
|
122
122
|
|
|
123
123
|
def __init__(
|
|
124
124
|
self,
|
|
125
|
-
model: str = "
|
|
125
|
+
model: str = "llama-3.1-405b-turbo",
|
|
126
126
|
max_tokens: int = 4096,
|
|
127
127
|
temperature: float = 1,
|
|
128
128
|
top_p: float = 1,
|
|
@@ -176,6 +176,7 @@ class DiscordRocks(Provider):
|
|
|
176
176
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
177
177
|
"content-type": "application/json",
|
|
178
178
|
"dnt": "1",
|
|
179
|
+
"authorization": "Bearer missing api key",
|
|
179
180
|
"origin": "https://llmplayground.net",
|
|
180
181
|
"priority": "u=1, i",
|
|
181
182
|
"referer": "https://llmplayground.net/",
|
webscout/Provider/Llama3.py
CHANGED
webscout/Provider/OLLAMA.py
CHANGED
|
@@ -36,11 +36,12 @@ class OLLAMA(Provider):
|
|
|
36
36
|
proxies: dict = {},
|
|
37
37
|
history_offset: int = 10250,
|
|
38
38
|
act: str = None,
|
|
39
|
+
system_prompt: str = "You are a helpful and friendly AI assistant.",
|
|
39
40
|
):
|
|
40
41
|
"""Instantiates Ollama
|
|
41
42
|
|
|
42
43
|
Args:
|
|
43
|
-
model (str, optional): Model name. Defaults to '
|
|
44
|
+
model (str, optional): Model name. Defaults to 'qwen2:0.5b'.
|
|
44
45
|
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
45
46
|
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
46
47
|
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
@@ -50,12 +51,14 @@ class OLLAMA(Provider):
|
|
|
50
51
|
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
51
52
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
52
53
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
54
|
+
system_prompt (str, optional): System prompt for Ollama. Defaults to "You are a helpful and friendly AI assistant.".
|
|
53
55
|
"""
|
|
54
56
|
self.model = model
|
|
55
57
|
self.is_conversation = is_conversation
|
|
56
58
|
self.max_tokens_to_sample = max_tokens
|
|
57
59
|
self.timeout = timeout
|
|
58
60
|
self.last_response = {}
|
|
61
|
+
self.system_prompt = system_prompt
|
|
59
62
|
|
|
60
63
|
self.__available_optimizers = (
|
|
61
64
|
method
|
|
@@ -110,21 +113,19 @@ class OLLAMA(Provider):
|
|
|
110
113
|
)
|
|
111
114
|
|
|
112
115
|
def for_stream():
|
|
116
|
+
# Correctly call ollama.chat with stream=True
|
|
113
117
|
stream = ollama.chat(model=self.model, messages=[
|
|
118
|
+
{'role': 'system', 'content': self.system_prompt},
|
|
114
119
|
{'role': 'user', 'content': conversation_prompt}
|
|
115
120
|
], stream=True)
|
|
116
121
|
|
|
117
|
-
|
|
122
|
+
# Yield each chunk directly
|
|
118
123
|
for chunk in stream:
|
|
119
|
-
|
|
120
|
-
yield chunk['message']['content'] if raw else dict(text=message_load)
|
|
121
|
-
self.last_response.update(dict(text=message_load))
|
|
122
|
-
self.conversation.update_chat_history(
|
|
123
|
-
prompt, self.get_message(self.last_response)
|
|
124
|
-
)
|
|
124
|
+
yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
|
|
125
125
|
|
|
126
126
|
def for_non_stream():
|
|
127
127
|
response = ollama.chat(model=self.model, messages=[
|
|
128
|
+
{'role': 'system', 'content': self.system_prompt}, # Add system message
|
|
128
129
|
{'role': 'user', 'content': conversation_prompt}
|
|
129
130
|
])
|
|
130
131
|
self.last_response.update(dict(text=response['message']['content']))
|
|
@@ -183,6 +184,6 @@ class OLLAMA(Provider):
|
|
|
183
184
|
return response["text"]
|
|
184
185
|
if __name__ == "__main__":
|
|
185
186
|
ollama_provider = OLLAMA(model="qwen:0.5b")
|
|
186
|
-
response = ollama_provider.chat("hi")
|
|
187
|
+
response = ollama_provider.chat("hi", stream=True)
|
|
187
188
|
for r in response:
|
|
188
|
-
print(r, end="", flush=True)
|
|
189
|
+
print(r, end="", flush=True)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version:
|
|
3
|
+
Version: 5.1
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -66,6 +66,8 @@ Requires-Dist: requests-html
|
|
|
66
66
|
Requires-Dist: bson
|
|
67
67
|
Requires-Dist: cloudscraper
|
|
68
68
|
Requires-Dist: emoji
|
|
69
|
+
Requires-Dist: colorlog
|
|
70
|
+
Requires-Dist: openai
|
|
69
71
|
Provides-Extra: dev
|
|
70
72
|
Requires-Dist: ruff >=0.1.6 ; extra == 'dev'
|
|
71
73
|
Requires-Dist: pytest >=7.4.2 ; extra == 'dev'
|
|
@@ -1645,15 +1647,13 @@ gguf.convert(
|
|
|
1645
1647
|
|
|
1646
1648
|
Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
|
|
1647
1649
|
|
|
1648
|
-
**Example:**
|
|
1649
|
-
|
|
1650
1650
|
```python
|
|
1651
1651
|
from webscout import autollama
|
|
1652
1652
|
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
|
-
)
|
|
1653
|
+
model_path = "Vortex4ai/Jarvis-0.5B"
|
|
1654
|
+
gguf_file = "test2-q4_k_m.gguf"
|
|
1655
|
+
|
|
1656
|
+
autollama.main(model_path, gguf_file)
|
|
1657
1657
|
```
|
|
1658
1658
|
|
|
1659
1659
|
**Command Line Usage:**
|
|
@@ -24,9 +24,9 @@ webscout/webscout_search_async.py,sha256=dooKGwLm0cwTml55Vy6NHPPY-nymEqX2h8laX94
|
|
|
24
24
|
webscout/websx_search.py,sha256=5hfkkmGFhyQzojUpvMzIOJ3DBZIBNS90UReaacsfu6s,521
|
|
25
25
|
webscout/Agents/Onlinesearcher.py,sha256=GzF2JcMfj07d74mxQEoaxwtxahgLHl3b_ugTbXjOwq4,7113
|
|
26
26
|
webscout/Agents/__init__.py,sha256=VbGyW5pulh3LRqbVTv54n5TwWsrTqOANRioG18xtdJ0,58
|
|
27
|
-
webscout/Agents/functioncall.py,sha256=
|
|
27
|
+
webscout/Agents/functioncall.py,sha256=qH1Tofi4h5CK5RhXaLQhXu8swEUmcyK9R5xpS6jMLrs,5784
|
|
28
28
|
webscout/Extra/__init__.py,sha256=GG1qUwS-HspT4TeeAIT4qFpM8PaO1ZdQhpelctaM7Rs,99
|
|
29
|
-
webscout/Extra/autollama.py,sha256=
|
|
29
|
+
webscout/Extra/autollama.py,sha256=qM8alxlWzg10BGIYKZBUtIEAXrkvEOWBwSxdPp3zq9I,6226
|
|
30
30
|
webscout/Extra/gguf.py,sha256=RvSp7xuaD6epAA9iAzthUnAQ3HA5N-svMyKUadAVnw8,7009
|
|
31
31
|
webscout/Extra/weather.py,sha256=wdSrQxZRpbNfyaux0BeLdaDWyde5KwxZjSUM13820X0,2460
|
|
32
32
|
webscout/Extra/weather_ascii.py,sha256=Aed-_EUzvTEjBXbOpNRxkJBLa6fXsclknXP06HnQD18,808
|
|
@@ -47,14 +47,14 @@ webscout/Provider/Cohere.py,sha256=OZ7-0iaJ8L5e4Sy-L2UGm8SnBmS7CbaFIj6a08bABVw,8
|
|
|
47
47
|
webscout/Provider/DARKAI.py,sha256=JpfFcPfd2kp15KSJ7GJ5Zy4zrwYQ_zHpqdFD2904Ii0,9065
|
|
48
48
|
webscout/Provider/Deepinfra.py,sha256=tdST5aQjaCs9_B5mrnrXmihDei73MjB-F8cpES-noc4,18756
|
|
49
49
|
webscout/Provider/Deepseek.py,sha256=jp8cZhbmscDjlXLCGI8MhDGORkbbxyeUlCqu5Z5GGSI,9210
|
|
50
|
-
webscout/Provider/DiscordRocks.py,sha256=
|
|
50
|
+
webscout/Provider/DiscordRocks.py,sha256=AgpAofgHY8MMKYhuqhtwLM8qGiYatStc2Aa1XX-3PPU,15028
|
|
51
51
|
webscout/Provider/Farfalle.py,sha256=zl2AD5NomuHCkW21tDfI1Z-KIlhiuQ32eiNM-1B4KWQ,9010
|
|
52
52
|
webscout/Provider/Gemini.py,sha256=V79nIi5vhPfvjlGYg5XuH6RfY7AyNnBqnJM-OBK99hE,8453
|
|
53
53
|
webscout/Provider/Groq.py,sha256=h_dPKwqXRwmgvmEmkDYKdXwrlI4Zm2vZuCnSMItoa2w,28662
|
|
54
54
|
webscout/Provider/Koboldai.py,sha256=KwWx2yPlvT9BGx37iNvSbgzWkJ9I8kSOmeg7sL1hb0M,15806
|
|
55
55
|
webscout/Provider/Llama.py,sha256=pqjpB09MFufImzTav1PwTWsukSCr3QiB-yFGJIIBAu8,8293
|
|
56
|
-
webscout/Provider/Llama3.py,sha256=
|
|
57
|
-
webscout/Provider/OLLAMA.py,sha256=
|
|
56
|
+
webscout/Provider/Llama3.py,sha256=UnSWyBMSkp4WAxU4zNI9VNsZY0aAOHvT7AK0xJlJW90,7559
|
|
57
|
+
webscout/Provider/OLLAMA.py,sha256=g8ejBjEZN0zya-10-v_njADZ796Uxu4Nbj_gaNrlj5I,7374
|
|
58
58
|
webscout/Provider/Openai.py,sha256=SjfVOwY94unVnXhvN0Fkome-q2-wi4mPJk_vCGq5Fjc,20617
|
|
59
59
|
webscout/Provider/PI.py,sha256=IodVvGR_RIZpGJ0ypFF4U6NBMZAZ5O1BlRFMelot8so,8364
|
|
60
60
|
webscout/Provider/Perplexity.py,sha256=gUnXyVNbl6tWAqirwHEoPkjCzxpORcKxL77aoFKepBk,21485
|
|
@@ -75,9 +75,9 @@ webscout/Provider/meta.py,sha256=3iBylmAk9d673Axvw6hFi0-0x_Fq7ZgtH_1j2_rcDwY,307
|
|
|
75
75
|
webscout/Provider/turboseek.py,sha256=BNx_urbs6Ixr7SEOgL4Uo1iZdjYC7CxoefJcsN4LK6I,9138
|
|
76
76
|
webscout/Provider/xdash.py,sha256=KUDTEX8I0z72bIDi-w5Se7xmB_lbmaX7KlCmIl2ad4c,7925
|
|
77
77
|
webscout/Provider/yep.py,sha256=RbEBzHeEFxgfdnHXHuBny6NKHcYYYNA6bvTggvAzoLk,10399
|
|
78
|
-
webscout-
|
|
79
|
-
webscout-
|
|
80
|
-
webscout-
|
|
81
|
-
webscout-
|
|
82
|
-
webscout-
|
|
83
|
-
webscout-
|
|
78
|
+
webscout-5.1.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
|
|
79
|
+
webscout-5.1.dist-info/METADATA,sha256=5QksBKkY8zwt12vphkMh6GFj6SiwZrQXYdetRMd0HMY,50812
|
|
80
|
+
webscout-5.1.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
|
|
81
|
+
webscout-5.1.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
|
|
82
|
+
webscout-5.1.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
|
|
83
|
+
webscout-5.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|