webscout 4.9__py3-none-any.whl → 5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Agents/functioncall.py +58 -102
- webscout/Provider/DiscordRocks.py +2 -1
- {webscout-4.9.dist-info → webscout-5.0.dist-info}/METADATA +1 -1
- {webscout-4.9.dist-info → webscout-5.0.dist-info}/RECORD +8 -8
- {webscout-4.9.dist-info → webscout-5.0.dist-info}/LICENSE.md +0 -0
- {webscout-4.9.dist-info → webscout-5.0.dist-info}/WHEEL +0 -0
- {webscout-4.9.dist-info → webscout-5.0.dist-info}/entry_points.txt +0 -0
- {webscout-4.9.dist-info → webscout-5.0.dist-info}/top_level.txt +0 -0
webscout/Agents/functioncall.py
CHANGED
|
@@ -1,72 +1,44 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import logging
|
|
3
|
-
from webscout import
|
|
3
|
+
from webscout import LLAMA3, WEBS
|
|
4
4
|
|
|
5
5
|
class FunctionCallingAgent:
|
|
6
|
-
def __init__(self, model: str = "
|
|
6
|
+
def __init__(self, model: str = "llama3-8b",
|
|
7
7
|
system_prompt: str = 'You are a helpful assistant that will always answer what the user wants',
|
|
8
8
|
tools: list = None):
|
|
9
|
-
|
|
10
|
-
Initialize the FunctionCallingAgent with the model, system prompt, and tools.
|
|
11
|
-
|
|
12
|
-
Args:
|
|
13
|
-
model (str): The model to use for deepinfra chat.
|
|
14
|
-
system_prompt (str): The system prompt to initialize the model.
|
|
15
|
-
tools (list): A list of tools the agent can use.
|
|
16
|
-
"""
|
|
17
|
-
self.deepinfra = DeepInfra(model=model, system_prompt=system_prompt, timeout=300)
|
|
9
|
+
self.LLAMA3 = LLAMA3(model=model, system=system_prompt, timeout=300)
|
|
18
10
|
self.tools = tools if tools is not None else []
|
|
19
|
-
|
|
11
|
+
self.webs = WEBS()
|
|
20
12
|
|
|
21
13
|
def function_call_handler(self, message_text: str) -> dict:
|
|
22
|
-
"""
|
|
23
|
-
Handles function calls based on the provided message text.
|
|
24
|
-
|
|
25
|
-
Args:
|
|
26
|
-
message_text (str): The input message text from the user.
|
|
27
|
-
|
|
28
|
-
Returns:
|
|
29
|
-
dict: The extracted function call and arguments.
|
|
30
|
-
"""
|
|
31
14
|
system_message = self._generate_system_message(message_text)
|
|
32
|
-
response = self.
|
|
15
|
+
response = self.LLAMA3.chat(system_message)
|
|
33
16
|
# logging.info(f"Raw response: {response}")
|
|
34
|
-
|
|
35
17
|
return self._parse_function_call(response)
|
|
36
18
|
|
|
37
19
|
def _generate_system_message(self, user_message: str) -> str:
|
|
38
|
-
""
|
|
39
|
-
Generates a system message incorporating the user message and available tools.
|
|
40
|
-
|
|
41
|
-
Args:
|
|
42
|
-
user_message (str): The input message from the user.
|
|
43
|
-
|
|
44
|
-
Returns:
|
|
45
|
-
str: The formatted system message.
|
|
46
|
-
"""
|
|
47
|
-
tools_description = '\n'.join([f"{tool['function']['name']}: {tool['function'].get('description', '')}" for tool in self.tools])
|
|
20
|
+
tools_description = '\n'.join([f"- {tool['function']['name']}: {tool['function'].get('description', '')}" for tool in self.tools])
|
|
48
21
|
return (
|
|
49
|
-
|
|
50
|
-
"
|
|
51
|
-
"
|
|
52
|
-
|
|
53
|
-
"
|
|
54
|
-
"
|
|
55
|
-
"
|
|
56
|
-
"
|
|
57
|
-
|
|
22
|
+
"You are an AI assistant capable of understanding user requests and using tools to fulfill them. "
|
|
23
|
+
"Always respond using the JSON format specified below, even if you're not sure about the answer. "
|
|
24
|
+
f"Available tools:\n{tools_description}\n\n"
|
|
25
|
+
"Instructions:\n"
|
|
26
|
+
"1. Analyze the user's request.\n"
|
|
27
|
+
"2. Choose the most appropriate tool based on the request.\n"
|
|
28
|
+
"3. Respond ONLY with a JSON object in this exact format:\n"
|
|
29
|
+
"{\n"
|
|
30
|
+
' "tool_name": "name_of_the_tool",\n'
|
|
31
|
+
' "tool_input": {\n'
|
|
32
|
+
' "param1": "value1",\n'
|
|
33
|
+
' "param2": "value2"\n'
|
|
34
|
+
" }\n"
|
|
35
|
+
"}\n\n"
|
|
36
|
+
"If you can't determine a suitable tool, use the 'general_ai' tool with the user's message as the 'question' parameter.\n\n"
|
|
37
|
+
f"User request: {user_message}\n\n"
|
|
38
|
+
"Your response (in JSON format):"
|
|
58
39
|
)
|
|
59
40
|
|
|
60
41
|
def _parse_function_call(self, response: str) -> dict:
|
|
61
|
-
"""
|
|
62
|
-
Parses the response from the model to extract the function call.
|
|
63
|
-
|
|
64
|
-
Args:
|
|
65
|
-
response (str): The raw response from the model.
|
|
66
|
-
|
|
67
|
-
Returns:
|
|
68
|
-
dict: A dictionary containing the function name and arguments.
|
|
69
|
-
"""
|
|
70
42
|
try:
|
|
71
43
|
# Find the JSON-like part of the response
|
|
72
44
|
start_idx = response.find("{")
|
|
@@ -76,14 +48,7 @@ class FunctionCallingAgent:
|
|
|
76
48
|
raise ValueError("No valid JSON structure found in the response.")
|
|
77
49
|
|
|
78
50
|
response_json_str = response[start_idx:end_idx]
|
|
79
|
-
|
|
80
|
-
# Replace single quotes with double quotes and remove extra braces
|
|
81
|
-
response_json_str = response_json_str.replace("'", '"')
|
|
82
|
-
response_json_str = response_json_str.replace("{{", "{").replace("}}", "}")
|
|
83
51
|
|
|
84
|
-
# Remove any leading or trailing whitespace
|
|
85
|
-
response_json_str = response_json_str.strip()
|
|
86
|
-
|
|
87
52
|
# Attempt to load the JSON string
|
|
88
53
|
return json.loads(response_json_str)
|
|
89
54
|
|
|
@@ -92,17 +57,8 @@ class FunctionCallingAgent:
|
|
|
92
57
|
return {"error": str(e)}
|
|
93
58
|
|
|
94
59
|
def execute_function(self, function_call_data: dict) -> str:
|
|
95
|
-
""
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
function_call_data (dict): A dictionary containing the function name and arguments.
|
|
100
|
-
|
|
101
|
-
Returns:
|
|
102
|
-
str: The result of the function execution.
|
|
103
|
-
"""
|
|
104
|
-
function_name = function_call_data.get("tool_name") # Use 'tool_name' instead of 'name'
|
|
105
|
-
arguments = function_call_data.get("tool_input", {}) # Use 'tool_input' instead of 'arguments'
|
|
60
|
+
function_name = function_call_data.get("tool_name")
|
|
61
|
+
arguments = function_call_data.get("tool_input", {})
|
|
106
62
|
|
|
107
63
|
if not isinstance(arguments, dict):
|
|
108
64
|
logging.error("Invalid arguments format.")
|
|
@@ -110,28 +66,32 @@ class FunctionCallingAgent:
|
|
|
110
66
|
|
|
111
67
|
logging.info(f"Executing function: {function_name} with arguments: {arguments}")
|
|
112
68
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
69
|
+
# if function_name == "web_search":
|
|
70
|
+
# return self._handle_web_search(arguments)
|
|
71
|
+
# elif function_name == "general_ai":
|
|
72
|
+
# return self._handle_general_ai(arguments)
|
|
73
|
+
# else:
|
|
74
|
+
# return f"Function '{function_name}' is not implemented."
|
|
117
75
|
|
|
118
76
|
# def _handle_web_search(self, arguments: dict) -> str:
|
|
119
|
-
# """
|
|
120
|
-
# Handles web search queries using the WEBS tool.
|
|
121
|
-
|
|
122
|
-
# Args:
|
|
123
|
-
# arguments (dict): A dictionary containing the query argument.
|
|
124
|
-
|
|
125
|
-
# Returns:
|
|
126
|
-
# str: The result of the web search.
|
|
127
|
-
# """
|
|
128
77
|
# query = arguments.get("query")
|
|
129
78
|
# if not query:
|
|
130
79
|
# return "Please provide a search query."
|
|
131
80
|
|
|
132
|
-
# search_results = self.webs.text(query)
|
|
133
|
-
#
|
|
134
|
-
#
|
|
81
|
+
# search_results = self.webs.text(query, max_results=3)
|
|
82
|
+
# formatted_results = "\n\n".join(
|
|
83
|
+
# f"{i+1}. {result['title']}\n{result['body']}\nURL: {result['href']}"
|
|
84
|
+
# for i, result in enumerate(search_results)
|
|
85
|
+
# )
|
|
86
|
+
# return f"Here's what I found:\n\n{formatted_results}"
|
|
87
|
+
|
|
88
|
+
# def _handle_general_ai(self, arguments: dict) -> str:
|
|
89
|
+
# question = arguments.get("question")
|
|
90
|
+
# if not question:
|
|
91
|
+
# return "Please provide a question for the AI to answer."
|
|
92
|
+
|
|
93
|
+
# response = self.LLAMA3.chat(question)
|
|
94
|
+
# return response
|
|
135
95
|
|
|
136
96
|
# Example usage
|
|
137
97
|
if __name__ == "__main__":
|
|
@@ -139,48 +99,44 @@ if __name__ == "__main__":
|
|
|
139
99
|
{
|
|
140
100
|
"type": "function",
|
|
141
101
|
"function": {
|
|
142
|
-
"name": "
|
|
102
|
+
"name": "web_search",
|
|
103
|
+
"description": "Search query on Google",
|
|
143
104
|
"parameters": {
|
|
144
105
|
"type": "object",
|
|
145
|
-
"title": "UserDetail",
|
|
146
106
|
"properties": {
|
|
147
|
-
"
|
|
148
|
-
"
|
|
149
|
-
"
|
|
150
|
-
},
|
|
151
|
-
"age": {
|
|
152
|
-
"title": "Age",
|
|
153
|
-
"type": "integer"
|
|
107
|
+
"query": {
|
|
108
|
+
"type": "string",
|
|
109
|
+
"description": "web search query"
|
|
154
110
|
}
|
|
155
111
|
},
|
|
156
|
-
"required": ["
|
|
112
|
+
"required": ["query"]
|
|
157
113
|
}
|
|
158
114
|
}
|
|
159
115
|
},
|
|
160
116
|
{
|
|
161
117
|
"type": "function",
|
|
162
118
|
"function": {
|
|
163
|
-
"name": "
|
|
164
|
-
"description": "
|
|
119
|
+
"name": "general_ai",
|
|
120
|
+
"description": "Use AI to answer a general question",
|
|
165
121
|
"parameters": {
|
|
166
122
|
"type": "object",
|
|
167
123
|
"properties": {
|
|
168
|
-
"
|
|
124
|
+
"question": {
|
|
169
125
|
"type": "string",
|
|
170
|
-
"description": "
|
|
126
|
+
"description": "The question to be answered by the AI"
|
|
171
127
|
}
|
|
172
128
|
},
|
|
173
|
-
"required": ["
|
|
129
|
+
"required": ["question"]
|
|
174
130
|
}
|
|
175
131
|
}
|
|
176
132
|
}
|
|
177
133
|
]
|
|
178
134
|
|
|
179
135
|
agent = FunctionCallingAgent(tools=tools)
|
|
180
|
-
message = "
|
|
136
|
+
message = "open yt"
|
|
181
137
|
function_call_data = agent.function_call_handler(message)
|
|
182
138
|
print(f"Function Call Data: {function_call_data}")
|
|
183
139
|
|
|
184
140
|
if "error" not in function_call_data:
|
|
185
141
|
result = agent.execute_function(function_call_data)
|
|
186
|
-
print(f"Function Execution Result: {result}")
|
|
142
|
+
print(f"Function Execution Result: {result}")
|
|
@@ -122,7 +122,7 @@ class DiscordRocks(Provider):
|
|
|
122
122
|
|
|
123
123
|
def __init__(
|
|
124
124
|
self,
|
|
125
|
-
model: str = "
|
|
125
|
+
model: str = "llama-3.1-405b-turbo",
|
|
126
126
|
max_tokens: int = 4096,
|
|
127
127
|
temperature: float = 1,
|
|
128
128
|
top_p: float = 1,
|
|
@@ -176,6 +176,7 @@ class DiscordRocks(Provider):
|
|
|
176
176
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
177
177
|
"content-type": "application/json",
|
|
178
178
|
"dnt": "1",
|
|
179
|
+
"authorization": "Bearer missing api key",
|
|
179
180
|
"origin": "https://llmplayground.net",
|
|
180
181
|
"priority": "u=1, i",
|
|
181
182
|
"referer": "https://llmplayground.net/",
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version:
|
|
3
|
+
Version: 5.0
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
|
|
5
5
|
Author: OEvortex
|
|
6
6
|
Author-email: helpingai5@gmail.com
|
|
@@ -24,7 +24,7 @@ webscout/webscout_search_async.py,sha256=dooKGwLm0cwTml55Vy6NHPPY-nymEqX2h8laX94
|
|
|
24
24
|
webscout/websx_search.py,sha256=5hfkkmGFhyQzojUpvMzIOJ3DBZIBNS90UReaacsfu6s,521
|
|
25
25
|
webscout/Agents/Onlinesearcher.py,sha256=GzF2JcMfj07d74mxQEoaxwtxahgLHl3b_ugTbXjOwq4,7113
|
|
26
26
|
webscout/Agents/__init__.py,sha256=VbGyW5pulh3LRqbVTv54n5TwWsrTqOANRioG18xtdJ0,58
|
|
27
|
-
webscout/Agents/functioncall.py,sha256=
|
|
27
|
+
webscout/Agents/functioncall.py,sha256=qH1Tofi4h5CK5RhXaLQhXu8swEUmcyK9R5xpS6jMLrs,5784
|
|
28
28
|
webscout/Extra/__init__.py,sha256=GG1qUwS-HspT4TeeAIT4qFpM8PaO1ZdQhpelctaM7Rs,99
|
|
29
29
|
webscout/Extra/autollama.py,sha256=8lyodIWAgJABzlMMHytlolPCgvUKh8ynkZD6MMEltXs,5970
|
|
30
30
|
webscout/Extra/gguf.py,sha256=RvSp7xuaD6epAA9iAzthUnAQ3HA5N-svMyKUadAVnw8,7009
|
|
@@ -47,7 +47,7 @@ webscout/Provider/Cohere.py,sha256=OZ7-0iaJ8L5e4Sy-L2UGm8SnBmS7CbaFIj6a08bABVw,8
|
|
|
47
47
|
webscout/Provider/DARKAI.py,sha256=JpfFcPfd2kp15KSJ7GJ5Zy4zrwYQ_zHpqdFD2904Ii0,9065
|
|
48
48
|
webscout/Provider/Deepinfra.py,sha256=tdST5aQjaCs9_B5mrnrXmihDei73MjB-F8cpES-noc4,18756
|
|
49
49
|
webscout/Provider/Deepseek.py,sha256=jp8cZhbmscDjlXLCGI8MhDGORkbbxyeUlCqu5Z5GGSI,9210
|
|
50
|
-
webscout/Provider/DiscordRocks.py,sha256=
|
|
50
|
+
webscout/Provider/DiscordRocks.py,sha256=AgpAofgHY8MMKYhuqhtwLM8qGiYatStc2Aa1XX-3PPU,15028
|
|
51
51
|
webscout/Provider/Farfalle.py,sha256=zl2AD5NomuHCkW21tDfI1Z-KIlhiuQ32eiNM-1B4KWQ,9010
|
|
52
52
|
webscout/Provider/Gemini.py,sha256=V79nIi5vhPfvjlGYg5XuH6RfY7AyNnBqnJM-OBK99hE,8453
|
|
53
53
|
webscout/Provider/Groq.py,sha256=h_dPKwqXRwmgvmEmkDYKdXwrlI4Zm2vZuCnSMItoa2w,28662
|
|
@@ -75,9 +75,9 @@ webscout/Provider/meta.py,sha256=3iBylmAk9d673Axvw6hFi0-0x_Fq7ZgtH_1j2_rcDwY,307
|
|
|
75
75
|
webscout/Provider/turboseek.py,sha256=BNx_urbs6Ixr7SEOgL4Uo1iZdjYC7CxoefJcsN4LK6I,9138
|
|
76
76
|
webscout/Provider/xdash.py,sha256=KUDTEX8I0z72bIDi-w5Se7xmB_lbmaX7KlCmIl2ad4c,7925
|
|
77
77
|
webscout/Provider/yep.py,sha256=RbEBzHeEFxgfdnHXHuBny6NKHcYYYNA6bvTggvAzoLk,10399
|
|
78
|
-
webscout-
|
|
79
|
-
webscout-
|
|
80
|
-
webscout-
|
|
81
|
-
webscout-
|
|
82
|
-
webscout-
|
|
83
|
-
webscout-
|
|
78
|
+
webscout-5.0.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
|
|
79
|
+
webscout-5.0.dist-info/METADATA,sha256=anlsD-HmXJT4_UV8LyrT5mxdnEKznprEDn2oPcf-Ucg,50819
|
|
80
|
+
webscout-5.0.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
|
|
81
|
+
webscout-5.0.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
|
|
82
|
+
webscout-5.0.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
|
|
83
|
+
webscout-5.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|