SimplerLLM 0.2.6__tar.gz → 0.2.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {simplerllm-0.2.6 → simplerllm-0.2.8}/PKG-INFO +3 -1
  2. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/agents/agent.py +2 -3
  3. simplerllm-0.2.8/SimplerLLM/agents/core_react_agent.py +208 -0
  4. simplerllm-0.2.8/SimplerLLM/agents/core_reflection_agent.py +174 -0
  5. simplerllm-0.2.8/SimplerLLM/agents/core_tool_calling_agent.py +159 -0
  6. simplerllm-0.2.8/SimplerLLM/agents/pandas_agent_exp.py +192 -0
  7. simplerllm-0.2.8/SimplerLLM/agents/sql_agent_exp.py +160 -0
  8. simplerllm-0.2.8/SimplerLLM/image/__init__.py +0 -0
  9. simplerllm-0.2.8/SimplerLLM/language/__init__.py +0 -0
  10. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/language/embeddings.py +2 -2
  11. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/language/llm.py +238 -11
  12. simplerllm-0.2.8/SimplerLLM/language/llm_providers/__init__.py +0 -0
  13. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/language/llm_providers/anthropic_llm.py +4 -3
  14. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/language/llm_providers/gemini_llm.py +4 -4
  15. simplerllm-0.2.8/SimplerLLM/language/llm_providers/lwh_llm.py +160 -0
  16. simplerllm-0.2.8/SimplerLLM/language/llm_providers/ollama_llm.py +128 -0
  17. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/language/llm_providers/openai_llm.py +18 -13
  18. simplerllm-0.2.8/SimplerLLM/prompts/__init__.py +0 -0
  19. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/prompts/messages_template.py +18 -1
  20. simplerllm-0.2.8/SimplerLLM/tools/__init__.py +0 -0
  21. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/tools/generic_loader.py +3 -18
  22. simplerllm-0.2.8/SimplerLLM/tools/pandas_func.py +32 -0
  23. simplerllm-0.2.8/SimplerLLM/tools/predefined_tools.py +35 -0
  24. simplerllm-0.2.8/SimplerLLM/tools/python_func.py +31 -0
  25. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM.egg-info/PKG-INFO +3 -1
  26. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM.egg-info/SOURCES.txt +30 -1
  27. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM.egg-info/requires.txt +2 -0
  28. {simplerllm-0.2.6 → simplerllm-0.2.8}/setup.py +1 -1
  29. simplerllm-0.2.8/tests/test_chunker.py +39 -0
  30. simplerllm-0.2.8/tests/test_data_agent.py +77 -0
  31. simplerllm-0.2.8/tests/test_embed.py +14 -0
  32. simplerllm-0.2.8/tests/test_generate.py +103 -0
  33. simplerllm-0.2.8/tests/test_lwh.py +28 -0
  34. simplerllm-0.2.8/tests/test_ollama.py +11 -0
  35. simplerllm-0.2.8/tests/test_pydantic.py +83 -0
  36. simplerllm-0.2.8/tests/test_reAct_agent.py +26 -0
  37. simplerllm-0.2.8/tests/test_react_core_agent.py +43 -0
  38. simplerllm-0.2.8/tests/test_reflection_core_agent.py +26 -0
  39. simplerllm-0.2.8/tests/test_search_query.py +38 -0
  40. simplerllm-0.2.8/tests/test_sql_agent.py +133 -0
  41. simplerllm-0.2.8/tests/test_tool.py +88 -0
  42. simplerllm-0.2.8/tests/test_tool_agent.py +36 -0
  43. simplerllm-0.2.8/tests/test_youtube_tool.py +320 -0
  44. simplerllm-0.2.6/SimplerLLM/tools/predefined_tools.py +0 -6
  45. {simplerllm-0.2.6 → simplerllm-0.2.8}/LICENSE +0 -0
  46. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/__init__.py +0 -0
  47. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/agents/__init__.py +0 -0
  48. /simplerllm-0.2.6/SimplerLLM/image/__init__.py → /simplerllm-0.2.8/SimplerLLM/agents/core_agent_in_team.py +0 -0
  49. /simplerllm-0.2.6/SimplerLLM/language/__init__.py → /simplerllm-0.2.8/SimplerLLM/agents/core_human_in_loop_agent.py +0 -0
  50. /simplerllm-0.2.6/SimplerLLM/language/llm_providers/__init__.py → /simplerllm-0.2.8/SimplerLLM/agents/core_planning_agent.py +0 -0
  51. /simplerllm-0.2.6/SimplerLLM/prompts/__init__.py → /simplerllm-0.2.8/SimplerLLM/agents/core_rag_agent.py +0 -0
  52. /simplerllm-0.2.6/SimplerLLM/tools/__init__.py → /simplerllm-0.2.8/SimplerLLM/agents/memory_agent.py +0 -0
  53. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/image/img_helper_funcs.py +0 -0
  54. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/image/stability_ai.py +0 -0
  55. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/language/llm_addons.py +0 -0
  56. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/language/llm_providers/llm_response_models.py +0 -0
  57. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/language/llm_providers/transformers_llm.py +0 -0
  58. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/prompts/prompt_builder.py +0 -0
  59. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/tools/file_functions.py +0 -0
  60. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/tools/file_loader.py +0 -0
  61. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/tools/json_helpers.py +0 -0
  62. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/tools/rapid_api.py +0 -0
  63. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/tools/serp.py +0 -0
  64. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/tools/text_chunker.py +0 -0
  65. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM/tools/web_crawler.py +0 -0
  66. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM.egg-info/dependency_links.txt +0 -0
  67. {simplerllm-0.2.6 → simplerllm-0.2.8}/SimplerLLM.egg-info/top_level.txt +0 -0
  68. {simplerllm-0.2.6 → simplerllm-0.2.8}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: SimplerLLM
3
- Version: 0.2.6
3
+ Version: 0.2.8
4
4
  Summary: An easy-to-use Library for interacting with language models.
5
5
  Home-page: https://github.com/hassancs91/SimplerLLM
6
6
  Author: Hasan Aboul Hasan
@@ -35,6 +35,8 @@ Requires-Dist: python_docx==1.1.0
35
35
  Requires-Dist: pytube==15.0.0
36
36
  Requires-Dist: Requests==2.31.0
37
37
  Requires-Dist: youtube_transcript_api==0.6.2
38
+ Requires-Dist: pandas==2.2.2
39
+ Requires-Dist: colorama==0.4.6
38
40
 
39
41
  # ⚪ SimplerLLM (Beta)
40
42
 
@@ -34,13 +34,12 @@ class Agent:
34
34
 
35
35
  Action_Response: the result of the action.
36
36
 
37
- Final Answer:\n
38
-
37
+
39
38
 
40
39
  """.strip()
41
40
 
42
41
 
43
-
42
+ # def output_final_answer()
44
43
 
45
44
 
46
45
  def add_tool(self, tool_function):
@@ -0,0 +1,208 @@
1
+ from pydantic import BaseModel
2
+ from SimplerLLM.language.llm import LLM
3
+ from SimplerLLM.prompts.messages_template import MessagesTemplate
4
+ from SimplerLLM.prompts.hub.agentic_prompts import react_core_agent_system_prompt
5
+ from SimplerLLM.utils.custom_verbose import verbose_print
6
+ from SimplerLLM.tools.json_helpers import extract_json_from_text
7
+ from SimplerLLM.tools.predefined_tools import PREDEFINED_TOOLS
8
+ import json
9
+
10
+ class ReActAgentResponse(BaseModel):
11
+ """
12
+ A model for storing the response from the ReActAgent.
13
+ Attributes:
14
+ user_query (str): The original query from the user.
15
+ agent_output (str): The output generated by the tool or directly from the language model.
16
+ """
17
+ user_query: str
18
+ agent_output: str
19
+
20
+ class ReActAgent:
21
+ """
22
+ A tool managing agent that interfaces with a language model to process and respond to user queries.
23
+
24
+ Attributes:
25
+ verbose (bool): If set to True, enables detailed logging for debugging purposes.
26
+ llm_instance (LLM): An instance of a language model from the SimplerLLM library.
27
+ available_actions (dict): A dictionary to store available tool functions mapped to their descriptions.
28
+ system_prompt_template (str): A template used to construct the system prompt for the language model.
29
+ """
30
+
31
+ def __init__(self, llm_instance: LLM, agent_system_prompt=react_core_agent_system_prompt, verbose: bool=False):
32
+ """
33
+ Initializes the ReActAgent with necessary parameters.
34
+
35
+ Parameters:
36
+ llm_instance (LLM): An instance of the language model to be used.
37
+ agent_system_prompt (str): The system prompt template for generating responses.
38
+ verbose (bool): Flag to turn on verbose logging for debugging.
39
+ """
40
+ self.verbose = verbose
41
+ self.llm_instance = llm_instance
42
+ self.available_actions = {}
43
+ self.system_prompt_template = agent_system_prompt
44
+ if self.verbose:
45
+ verbose_print("Initialized ReActAgent with verbose mode.", "info")
46
+
47
+ def add_tool(self, tool_function):
48
+ """
49
+ Adds a tool to the agent's available actions.
50
+
51
+ Parameters:
52
+ tool_function (callable): The function representing the tool to be added.
53
+
54
+ Raises:
55
+ ValueError: If the tool function is not predefined or properly decorated as a custom tool.
56
+ """
57
+ try:
58
+ if tool_function in PREDEFINED_TOOLS.values():
59
+ tool_name = next(name for name, func in PREDEFINED_TOOLS.items() if func == tool_function)
60
+ description = tool_function.__doc__.strip()
61
+ elif hasattr(tool_function, 'is_custom_tool') and tool_function.is_custom_tool:
62
+ tool_name = tool_function.__name__
63
+ description = tool_function.description
64
+ else:
65
+ raise ValueError("Tool function must be predefined or decorated as a custom tool.")
66
+
67
+ self.available_actions[tool_name] = {
68
+ "function": tool_function,
69
+ "description": description
70
+ }
71
+ if self.verbose:
72
+ verbose_print(f"Added tool: {tool_name} with description: {description}", "info")
73
+ except Exception as e:
74
+ if self.verbose:
75
+ verbose_print(f"Error in add_tool: {str(e)}", "error")
76
+ raise
77
+
78
+ def construct_system_prompt(self):
79
+ """
80
+ Constructs the system prompt from the available actions to guide the language model.
81
+
82
+ Returns:
83
+ str: The formatted system prompt incorporating descriptions of all available actions.
84
+ """
85
+ actions_description = "\n".join(
86
+ [f"{name}:\n {details['description']}"
87
+ for name, details in self.available_actions.items()]
88
+ )
89
+ return self.system_prompt_template.format(actions_list=actions_description)
90
+
91
+ def generate_response(self, user_query: str = None, messages: MessagesTemplate = None, max_turns = 5 ):
92
+ """
93
+ Generates a response to a user query using the language model and available tools.
94
+
95
+ Parameters:
96
+ user_query (str): The user's query to process.
97
+ execute_tool (bool): Whether to execute the tool function if applicable.
98
+ message_history: A history of past messages (not currently used).
99
+
100
+ Returns:
101
+ ReActAgentResponse: The response from the agent, either as direct LLM output or processed by a tool.
102
+ """
103
+ agent_system_prompt = self.construct_system_prompt()
104
+
105
+ if user_query and messages:
106
+ raise ValueError("Only one of 'user_query' or 'messages' should be provided.")
107
+
108
+
109
+ agent_messages = MessagesTemplate()
110
+
111
+
112
+ turn_count = 1
113
+ user_request = ""
114
+
115
+ if user_query:
116
+ agent_messages.add_user_message(user_query)
117
+ user_request = user_query
118
+ if messages:
119
+ agent_messages.prepend_messages(messages.get_messages())
120
+ user_request = agent_messages.get_messages()[-1]['content']
121
+
122
+
123
+ tool_agent_response = ReActAgentResponse(
124
+ user_query=user_request,
125
+ agent_output=""
126
+ )
127
+
128
+
129
+ while turn_count <= max_turns:
130
+ if self.verbose:
131
+ verbose_print(f"Turn: {turn_count}")
132
+ verbose_print("----------------------")
133
+
134
+ turn_count += 1
135
+ #used to extract last user query in case messages were provided instead of single prompt
136
+ history = agent_messages.get_messages()
137
+ if user_query:
138
+
139
+ agent_response = self.llm_instance.generate_response(messages=history, system_prompt=agent_system_prompt)
140
+ if messages:
141
+
142
+ agent_response = self.llm_instance.generate_response(messages=history, system_prompt=agent_system_prompt)
143
+
144
+
145
+ agent_messages.add_assistant_message(agent_response)
146
+
147
+ final_response = agent_response
148
+
149
+ if self.verbose:
150
+ verbose_print(f"LLM First Response: {agent_response}", "info")
151
+
152
+ # Attempt to extract a JSON action from the LLM response
153
+ try:
154
+ action_json = extract_json_from_text(agent_response)
155
+ except json.JSONDecodeError:
156
+ if self.verbose:
157
+ verbose_print("Failed to decode JSON from response.", "error")
158
+ action_json = None
159
+
160
+ # Handle response based on the presence of an actionable JSON
161
+ if action_json:
162
+ if 'function_name' in action_json[0]:
163
+ function_name = action_json[0].get('function_name')
164
+ function_params = action_json[0].get('function_params', {})
165
+ if function_name not in self.available_actions:
166
+ raise Exception(f"Unknown action: {function_name}")
167
+
168
+
169
+
170
+
171
+ action_function = self.available_actions[function_name]["function"]
172
+ result = action_function(**function_params)
173
+
174
+ if self.verbose:
175
+ verbose_print(f"Executed {function_name} with parameters {function_params}.", "info")
176
+
177
+ observation = f"Observation: {result}"
178
+
179
+
180
+ agent_messages.add_user_message(observation)
181
+
182
+ if self.verbose:
183
+ verbose_print("----------------------")
184
+
185
+
186
+
187
+ else:
188
+ break
189
+ else:
190
+ if self.verbose:
191
+ verbose_print("No action JSON found; returning LLM response directly.", "info")
192
+ break
193
+
194
+ #Check if the final_response is the final
195
+
196
+
197
+ try:
198
+ is_final_json = extract_json_from_text(agent_response)
199
+ if is_final_json:
200
+ if 'final_answer' in is_final_json[0]:
201
+ tool_agent_response.agent_output = is_final_json[0]['final_answer']
202
+ return tool_agent_response
203
+
204
+ except json.JSONDecodeError:
205
+ pass
206
+
207
+ tool_agent_response.agent_output = final_response
208
+ return tool_agent_response
@@ -0,0 +1,174 @@
1
+ from pydantic import BaseModel
2
+ from SimplerLLM.language.llm import LLM
3
+ from SimplerLLM.prompts.messages_template import MessagesTemplate
4
+ from SimplerLLM.prompts.hub.agentic_prompts import reflection_core_agent_system_prompt
5
+ from SimplerLLM.utils.custom_verbose import verbose_print
6
+ from SimplerLLM.tools.json_helpers import extract_json_from_text
7
+ from SimplerLLM.tools.predefined_tools import PREDEFINED_TOOLS
8
+ import json
9
+
10
+ class ReflectionAgentResponse(BaseModel):
11
+ """
12
+ A model for storing the response from the ReflectionAgent.
13
+ Attributes:
14
+ user_query (str): The original query from the user.
15
+ agent_output (str): The output generated by the tool or directly from the language model.
16
+ """
17
+ user_query: str
18
+ agent_output: str
19
+
20
+ class ReflectionAgent:
21
+ """
22
+ A tool managing agent that interfaces with a language model to process and respond to user queries.
23
+
24
+ Attributes:
25
+ verbose (bool): If set to True, enables detailed logging for debugging purposes.
26
+ llm_instance (LLM): An instance of a language model from the SimplerLLM library.
27
+ available_actions (dict): A dictionary to store available tool functions mapped to their descriptions.
28
+ system_prompt_template (str): A template used to construct the system prompt for the language model.
29
+ """
30
+
31
+ def __init__(self, llm_instance: LLM, agent_system_prompt=reflection_core_agent_system_prompt, verbose: bool=False):
32
+ """
33
+ Initializes the RefletctionAgent with necessary parameters.
34
+
35
+ Parameters:
36
+ llm_instance (LLM): An instance of the language model to be used.
37
+ agent_system_prompt (str): The system prompt template for generating responses.
38
+ verbose (bool): Flag to turn on verbose logging for debugging.
39
+ """
40
+ self.verbose = verbose
41
+ self.llm_instance = llm_instance
42
+ self.available_actions = {}
43
+ self.system_prompt_template = agent_system_prompt
44
+ if self.verbose:
45
+ verbose_print("Initialized RefletctionAgent with verbose mode.", "info")
46
+
47
+ def add_tool(self, tool_function):
48
+ """
49
+ Adds a tool to the agent's available actions.
50
+
51
+ Parameters:
52
+ tool_function (callable): The function representing the tool to be added.
53
+
54
+ Raises:
55
+ ValueError: If the tool function is not predefined or properly decorated as a custom tool.
56
+ """
57
+ try:
58
+ if tool_function in PREDEFINED_TOOLS.values():
59
+ tool_name = next(name for name, func in PREDEFINED_TOOLS.items() if func == tool_function)
60
+ description = tool_function.__doc__.strip()
61
+ elif hasattr(tool_function, 'is_custom_tool') and tool_function.is_custom_tool:
62
+ tool_name = tool_function.__name__
63
+ description = tool_function.description
64
+ else:
65
+ raise ValueError("Tool function must be predefined or decorated as a custom tool.")
66
+
67
+ self.available_actions[tool_name] = {
68
+ "function": tool_function,
69
+ "description": description
70
+ }
71
+ if self.verbose:
72
+ verbose_print(f"Added tool: {tool_name} with description: {description}", "info")
73
+ except Exception as e:
74
+ if self.verbose:
75
+ verbose_print(f"Error in add_tool: {str(e)}", "error")
76
+ raise
77
+
78
+ def construct_system_prompt(self):
79
+ """
80
+ Constructs the system prompt from the available actions to guide the language model.
81
+
82
+ Returns:
83
+ str: The formatted system prompt incorporating descriptions of all available actions.
84
+ """
85
+ actions_description = "\n".join(
86
+ [f"{name}:\n {details['description']}"
87
+ for name, details in self.available_actions.items()]
88
+ )
89
+ return self.system_prompt_template.format(actions_list=actions_description)
90
+
91
+
92
+
93
+ def generate_response(self, user_query: str = None, messages: MessagesTemplate = None, max_turns = 5 ):
94
+ """
95
+ Generates a response to a user query using the language model and available tools.
96
+
97
+ Parameters:
98
+ user_query (str): The user's query to process.
99
+ message_history: A history of past messages (not currently used).
100
+
101
+ Returns:
102
+ ReflectionAgentResponse: The response from the agent, either as direct LLM output or processed by a tool.
103
+ """
104
+ agent_system_prompt = self.construct_system_prompt()
105
+
106
+ if user_query and messages:
107
+ raise ValueError("Only one of 'user_query' or 'messages' should be provided.")
108
+
109
+
110
+ agent_messages = MessagesTemplate()
111
+
112
+
113
+ turn_count = 1
114
+ user_request = ""
115
+
116
+ if user_query:
117
+ agent_messages.add_user_message(user_query)
118
+ user_request = user_query
119
+ if messages:
120
+ agent_messages.prepend_messages(messages.get_messages())
121
+ user_request = agent_messages.get_messages()[-1]['content']
122
+
123
+
124
+ tool_agent_response = ReflectionAgentResponse(
125
+ user_query=user_request,
126
+ agent_output=""
127
+ )
128
+
129
+
130
+ while turn_count <= max_turns:
131
+
132
+ if self.verbose:
133
+ verbose_print(f"Turn: {turn_count}")
134
+ verbose_print("----------------------")
135
+
136
+
137
+ if turn_count > 1 :
138
+ agent_messages.add_user_message("Double check your last asnwer, and check for any improvments")
139
+
140
+
141
+ turn_count += 1
142
+
143
+
144
+ #used to extract last user query in case messages were provided instead of single prompt
145
+ history = agent_messages.get_messages()
146
+
147
+ if user_query:
148
+ agent_response = self.llm_instance.generate_response(messages=history, system_prompt=agent_system_prompt,max_tokens=4096)
149
+ if messages:
150
+ agent_response = self.llm_instance.generate_response(messages=history, system_prompt=agent_system_prompt,max_tokens=4096)
151
+
152
+
153
+ agent_messages.add_assistant_message(agent_response)
154
+
155
+ final_response = agent_response
156
+
157
+ if self.verbose:
158
+ verbose_print(f"LLM First Response: {agent_response}", "info")
159
+
160
+
161
+
162
+
163
+ try:
164
+ is_final_json = extract_json_from_text(agent_response)
165
+ if is_final_json:
166
+ if 'final_answer' in is_final_json[0]:
167
+ tool_agent_response.agent_output = is_final_json[0]['final_answer']
168
+ return tool_agent_response
169
+
170
+ except json.JSONDecodeError:
171
+ pass
172
+
173
+ tool_agent_response.agent_output = final_response
174
+ return tool_agent_response
@@ -0,0 +1,159 @@
1
+ from pydantic import BaseModel
2
+ from SimplerLLM.language.llm import LLM
3
+ from SimplerLLM.prompts.messages_template import MessagesTemplate
4
+ from SimplerLLM.prompts.hub.agentic_prompts import tool_calling_agent_system_prompt
5
+ from SimplerLLM.utils.custom_verbose import verbose_print
6
+ from SimplerLLM.tools.json_helpers import extract_json_from_text
7
+ from SimplerLLM.tools.predefined_tools import PREDEFINED_TOOLS
8
+ import json
9
+
10
+ class ToolCallingAgentResponse(BaseModel):
11
+ """
12
+ A model for storing the response from the ToolAgent.
13
+ Attributes:
14
+ user_query (str): The original query from the user.
15
+ agent_output (str): The output generated by the tool or directly from the language model.
16
+ """
17
+ user_query: str
18
+ agent_output: str
19
+
20
+ class ToolCallingAgent:
21
+ """
22
+ A tool managing agent that interfaces with a language model to process and respond to user queries.
23
+
24
+ Attributes:
25
+ verbose (bool): If set to True, enables detailed logging for debugging purposes.
26
+ llm_instance (LLM): An instance of a language model from the SimplerLLM library.
27
+ available_actions (dict): A dictionary to store available tool functions mapped to their descriptions.
28
+ system_prompt_template (str): A template used to construct the system prompt for the language model.
29
+ """
30
+
31
+ def __init__(self, llm_instance: LLM, agent_system_prompt=tool_calling_agent_system_prompt, verbose: bool=False):
32
+ """
33
+ Initializes the ToolAgent with necessary parameters.
34
+
35
+ Parameters:
36
+ llm_instance (LLM): An instance of the language model to be used.
37
+ agent_system_prompt (str): The system prompt template for generating responses.
38
+ verbose (bool): Flag to turn on verbose logging for debugging.
39
+ """
40
+ self.verbose = verbose
41
+ self.llm_instance = llm_instance
42
+ self.available_actions = {}
43
+ self.system_prompt_template = agent_system_prompt
44
+ if self.verbose:
45
+ verbose_print("Initialized ToolAgent with verbose mode.", "info")
46
+
47
+ def add_tool(self, tool_function):
48
+ """
49
+ Adds a tool to the agent's available actions.
50
+
51
+ Parameters:
52
+ tool_function (callable): The function representing the tool to be added.
53
+
54
+ Raises:
55
+ ValueError: If the tool function is not predefined or properly decorated as a custom tool.
56
+ """
57
+ try:
58
+ if tool_function in PREDEFINED_TOOLS.values():
59
+ tool_name = next(name for name, func in PREDEFINED_TOOLS.items() if func == tool_function)
60
+ description = tool_function.__doc__.strip()
61
+ elif hasattr(tool_function, 'is_custom_tool') and tool_function.is_custom_tool:
62
+ tool_name = tool_function.__name__
63
+ description = tool_function.description
64
+ else:
65
+ raise ValueError("Tool function must be predefined or decorated as a custom tool.")
66
+
67
+ self.available_actions[tool_name] = {
68
+ "function": tool_function,
69
+ "description": description
70
+ }
71
+ if self.verbose:
72
+ verbose_print(f"Added tool: {tool_name} with description: {description}", "info")
73
+ except Exception as e:
74
+ if self.verbose:
75
+ verbose_print(f"Error in add_tool: {str(e)}", "error")
76
+ raise
77
+
78
+ def construct_system_prompt(self):
79
+ """
80
+ Constructs the system prompt from the available actions to guide the language model.
81
+
82
+ Returns:
83
+ str: The formatted system prompt incorporating descriptions of all available actions.
84
+ """
85
+ actions_description = "\n".join(
86
+ [f"{name}:\n {details['description']}"
87
+ for name, details in self.available_actions.items()]
88
+ )
89
+ return self.system_prompt_template.format(actions_list=actions_description)
90
+
91
+
92
+
93
+ def generate_response(self, user_query: str = None, messages: MessagesTemplate = None, execute_tool: bool =True, ):
94
+ """
95
+ Generates a response to a user query using the language model and available tools.
96
+
97
+ Parameters:
98
+ user_query (str): The user's query to process.
99
+ execute_tool (bool): Whether to execute the tool function if applicable.
100
+ message_history: A history of past messages (not currently used).
101
+
102
+ Returns:
103
+ ToolAgentResponse: The response from the agent, either as direct LLM output or processed by a tool.
104
+ """
105
+ agent_system_prompt = self.construct_system_prompt()
106
+
107
+
108
+ if user_query and messages:
109
+ raise ValueError("Only one of 'user_query' or 'messages' should be provided.")
110
+
111
+ #used to extract last user query in case messages were provided instead of single prompt
112
+ user_request = ""
113
+
114
+ if user_query:
115
+ agent_response = self.llm_instance.generate_response(prompt=user_query, system_prompt=agent_system_prompt)
116
+ user_request = user_query
117
+ if messages:
118
+ history = messages.get_messages()
119
+ agent_response = self.llm_instance.generate_response(messages=history, system_prompt=agent_system_prompt)
120
+ user_request = history[-1]['content']
121
+
122
+
123
+
124
+
125
+ if self.verbose:
126
+ verbose_print(f"LLM Response: {agent_response}", "info")
127
+
128
+ # Attempt to extract a JSON action from the LLM response
129
+ try:
130
+ action_json = extract_json_from_text(agent_response)
131
+ except json.JSONDecodeError:
132
+ if self.verbose:
133
+ verbose_print("Failed to decode JSON from response.", "error")
134
+ action_json = None
135
+
136
+ # Handle response based on the presence of an actionable JSON
137
+ if action_json:
138
+ function_name = action_json[0].get('function_name')
139
+ function_params = action_json[0].get('function_params', {})
140
+ if function_name not in self.available_actions:
141
+ raise Exception(f"Unknown action: {function_name}")
142
+
143
+ tool_agent_response = ToolCallingAgentResponse(
144
+ user_query=user_request,
145
+ agent_output=str(action_json[0])
146
+ )
147
+
148
+ if execute_tool:
149
+ action_function = self.available_actions[function_name]["function"]
150
+ tool_agent_response.agent_output = action_function(**function_params)
151
+
152
+ if self.verbose:
153
+ verbose_print(f"Executed {function_name} with parameters {function_params}.", "info")
154
+
155
+ return tool_agent_response
156
+ else:
157
+ if self.verbose:
158
+ verbose_print("No action JSON found; returning LLM response directly.", "info")
159
+ return ToolCallingAgentResponse(user_query=user_request, agent_output=agent_response)