praisonaiagents 0.0.6__tar.gz → 0.0.8__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (24) hide show
  1. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/agent/agent.py +140 -67
  3. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents.egg-info/PKG-INFO +1 -1
  4. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/pyproject.toml +1 -1
  5. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/__init__.py +0 -0
  6. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/agent/__init__.py +0 -0
  7. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/agents/__init__.py +0 -0
  8. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/agents/agents.py +0 -0
  9. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/build/lib/praisonaiagents/__init__.py +0 -0
  10. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/build/lib/praisonaiagents/agent/__init__.py +0 -0
  11. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/build/lib/praisonaiagents/agent/agent.py +0 -0
  12. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/build/lib/praisonaiagents/agents/__init__.py +0 -0
  13. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/build/lib/praisonaiagents/agents/agents.py +0 -0
  14. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/build/lib/praisonaiagents/main.py +0 -0
  15. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/build/lib/praisonaiagents/task/__init__.py +0 -0
  16. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/build/lib/praisonaiagents/task/task.py +0 -0
  17. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/main.py +0 -0
  18. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/task/__init__.py +0 -0
  19. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents/task/task.py +0 -0
  20. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  21. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  22. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents.egg-info/requires.txt +0 -0
  23. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/praisonaiagents.egg-info/top_level.txt +0 -0
  24. {praisonaiagents-0.0.6 → praisonaiagents-0.0.8}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: praisonaiagents
3
- Version: 0.0.6
3
+ Version: 0.0.8
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -17,13 +17,92 @@ from ..main import (
17
17
  )
18
18
 
19
19
  class Agent:
20
+ def _generate_tool_definition(self, function_name):
21
+ """
22
+ Generate a tool definition from a function name by inspecting the function.
23
+ """
24
+ # First try to get the tool definition if it exists
25
+ tool_def_name = f"{function_name}_definition"
26
+ tool_def = globals().get(tool_def_name)
27
+ if not tool_def:
28
+ import __main__
29
+ tool_def = getattr(__main__, tool_def_name, None)
30
+
31
+ if tool_def:
32
+ return tool_def
33
+
34
+ # If no definition exists, try to generate one from the function
35
+ func = globals().get(function_name)
36
+ if not func:
37
+ import __main__
38
+ func = getattr(__main__, function_name, None)
39
+
40
+ if not func or not callable(func):
41
+ return None
42
+
43
+ import inspect
44
+ sig = inspect.signature(func)
45
+ parameters = {
46
+ "type": "object",
47
+ "properties": {},
48
+ "required": []
49
+ }
50
+
51
+ # Parse docstring for parameter descriptions
52
+ docstring = inspect.getdoc(func)
53
+ param_descriptions = {}
54
+ if docstring:
55
+ import re
56
+ param_section = re.split(r'\s*Args:\s*', docstring)
57
+ if len(param_section) > 1:
58
+ param_lines = param_section[1].split('\n')
59
+ for line in param_lines:
60
+ line = line.strip()
61
+ if line and ':' in line:
62
+ param_name, param_desc = line.split(':', 1)
63
+ param_descriptions[param_name.strip()] = param_desc.strip()
64
+
65
+ for name, param in sig.parameters.items():
66
+ param_type = "string" # Default type
67
+ if param.annotation != inspect.Parameter.empty:
68
+ if param.annotation == int:
69
+ param_type = "integer"
70
+ elif param.annotation == float:
71
+ param_type = "number"
72
+ elif param.annotation == bool:
73
+ param_type = "boolean"
74
+ elif param.annotation == list:
75
+ param_type = "array"
76
+ elif param.annotation == dict:
77
+ param_type = "object"
78
+
79
+ param_info = {"type": param_type}
80
+ if name in param_descriptions:
81
+ param_info["description"] = param_descriptions[name]
82
+
83
+ parameters["properties"][name] = param_info
84
+ if param.default == inspect.Parameter.empty:
85
+ parameters["required"].append(name)
86
+
87
+ # Extract description from docstring
88
+ description = docstring.split('\n')[0] if docstring else f"Function {function_name}"
89
+
90
+ return {
91
+ "type": "function",
92
+ "function": {
93
+ "name": function_name,
94
+ "description": description,
95
+ "parameters": parameters
96
+ }
97
+ }
98
+
20
99
  def __init__(
21
100
  self,
22
101
  name: str,
23
102
  role: str,
24
103
  goal: str,
25
104
  backstory: str,
26
- llm: Optional[Union[str, Any]] = "gpt-4o-mini",
105
+ llm: Optional[Union[str, Any]] = "gpt-4o",
27
106
  tools: Optional[List[Any]] = None,
28
107
  function_calling_llm: Optional[Any] = None,
29
108
  max_iter: int = 20,
@@ -46,14 +125,16 @@ class Agent:
46
125
  use_system_prompt: Optional[bool] = True,
47
126
  markdown: bool = True,
48
127
  self_reflect: bool = True,
49
- max_reflection_iter: int = 3
128
+ max_reflect: int = 3,
129
+ min_reflect: int = 1,
130
+ reflect_llm: Optional[str] = None
50
131
  ):
51
132
  self.name = name
52
133
  self.role = role
53
134
  self.goal = goal
54
135
  self.backstory = backstory
55
136
  self.llm = llm
56
- self.tools = tools if tools else []
137
+ self.tools = tools if tools else [] # Store original tools
57
138
  self.function_calling_llm = function_calling_llm
58
139
  self.max_iter = max_iter
59
140
  self.max_rpm = max_rpm
@@ -76,21 +157,29 @@ class Agent:
76
157
  self.chat_history = []
77
158
  self.markdown = markdown
78
159
  self.self_reflect = self_reflect
79
- self.max_reflection_iter = max_reflection_iter
80
-
160
+ self.max_reflect = max_reflect
161
+ self.min_reflect = min_reflect
162
+ self.reflect_llm = reflect_llm
81
163
  def execute_tool(self, function_name, arguments):
164
+ """
165
+ Execute a tool dynamically based on the function name and arguments.
166
+ """
82
167
  logging.debug(f"{self.name} executing tool {function_name} with arguments: {arguments}")
83
- if function_name == "get_weather":
84
- location = arguments.get("location", "Unknown Location")
85
- return {"temperature": "25C", "condition": "Sunny", "location": location}
86
- elif function_name == "search_tool":
87
- query = arguments.get("query", "AI trends in 2024")
88
- return {"results": [
89
- {"title": "AI advancements in 2024", "link": "url1", "summary": "Lots of advancements"},
90
- {"title": "New trends in AI", "link": "url2", "summary": "New trends being found"}
91
- ]}
92
- else:
93
- return f"Tool '{function_name}' is not recognized"
168
+
169
+ # Try to get the function from globals first
170
+ func = globals().get(function_name)
171
+ if not func:
172
+ # Then try to get from the main module
173
+ import __main__
174
+ func = getattr(__main__, function_name, None)
175
+
176
+ if func and callable(func):
177
+ try:
178
+ return func(**arguments)
179
+ except Exception as e:
180
+ return {"error": str(e)}
181
+
182
+ return {"error": f"Tool '{function_name}' is not callable"}
94
183
 
95
184
  def clear_history(self):
96
185
  self.chat_history = []
@@ -104,26 +193,25 @@ class Agent:
104
193
  logging.debug(f"{self.name} sending messages to LLM: {messages}")
105
194
 
106
195
  formatted_tools = []
196
+ if tools is None:
197
+ tools = self.tools
107
198
  if tools:
108
199
  for tool in tools:
109
- if isinstance(tool, dict):
200
+ if isinstance(tool, str):
201
+ # Generate tool definition for string tool names
202
+ tool_def = self._generate_tool_definition(tool)
203
+ if tool_def:
204
+ formatted_tools.append(tool_def)
205
+ else:
206
+ logging.warning(f"Could not generate definition for tool: {tool}")
207
+ elif isinstance(tool, dict):
110
208
  formatted_tools.append(tool)
111
209
  elif hasattr(tool, "to_openai_tool"):
112
210
  formatted_tools.append(tool.to_openai_tool())
113
- elif isinstance(tool, str):
114
- formatted_tools.append({
115
- "type": "function",
116
- "function": {
117
- "name": tool,
118
- "description": f"This is a tool called {tool}",
119
- "parameters": {
120
- "type": "object",
121
- "properties": {},
122
- },
123
- }
124
- })
211
+ elif callable(tool):
212
+ formatted_tools.append(self._generate_tool_definition(tool.__name__))
125
213
  else:
126
- display_error(f"Warning: Tool {tool} not recognized")
214
+ logging.warning(f"Tool {tool} not recognized")
127
215
 
128
216
  try:
129
217
  initial_response = client.chat.completions.create(
@@ -223,29 +311,7 @@ class Agent:
223
311
  if self.verbose:
224
312
  display_instruction(f"Agent {self.name} is processing prompt: {prompt}")
225
313
 
226
- formatted_tools = []
227
- if tools:
228
- for tool in tools:
229
- if isinstance(tool, dict):
230
- formatted_tools.append(tool)
231
- elif hasattr(tool, "to_openai_tool"):
232
- formatted_tools.append(tool.to_openai_tool())
233
- elif isinstance(tool, str):
234
- formatted_tools.append({
235
- "type": "function",
236
- "function": {
237
- "name": tool,
238
- "description": f"This is a tool called {tool}",
239
- "parameters": {
240
- "type": "object",
241
- "properties": {},
242
- },
243
- }
244
- })
245
- else:
246
- display_error(f"Warning: Tool {tool} not recognized")
247
-
248
- response = self._chat_completion(messages, temperature=temperature, tools=formatted_tools if formatted_tools else None)
314
+ response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None)
249
315
  if not response:
250
316
  return None
251
317
 
@@ -308,7 +374,7 @@ class Agent:
308
374
 
309
375
  try:
310
376
  reflection_response = client.beta.chat.completions.parse(
311
- model=self.llm,
377
+ model=self.reflect_llm if self.reflect_llm else self.llm,
312
378
  messages=messages,
313
379
  temperature=temperature,
314
380
  response_format=ReflectionOutput
@@ -317,35 +383,42 @@ class Agent:
317
383
  reflection_output = reflection_response.choices[0].message.parsed
318
384
 
319
385
  if self.verbose:
320
- display_self_reflection(f"Agent {self.name} self reflection: reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'")
386
+ display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'")
321
387
 
322
388
  messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})
323
389
 
324
- if reflection_output.satisfactory == "yes":
390
+ # Only consider satisfactory after minimum reflections
391
+ if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
392
+ if self.verbose:
393
+ display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections")
394
+ self.chat_history.append({"role": "user", "content": prompt})
395
+ self.chat_history.append({"role": "assistant", "content": response_text})
396
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
397
+ return response_text
398
+
399
+ # Check if we've hit max reflections
400
+ if reflection_count >= self.max_reflect - 1:
325
401
  if self.verbose:
326
- display_self_reflection("Agent marked the response as satisfactory")
402
+ display_self_reflection("Maximum reflection count reached, returning current response")
403
+ self.chat_history.append({"role": "user", "content": prompt})
327
404
  self.chat_history.append({"role": "assistant", "content": response_text})
328
405
  display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
329
406
  return response_text
330
407
 
331
- logging.debug(f"{self.name} reflection not satisfactory, requesting regeneration.")
408
+ logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
332
409
  messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
333
410
  response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
334
411
  response_text = response.choices[0].message.content.strip()
412
+ reflection_count += 1
413
+ continue # Continue the loop for more reflections
414
+
335
415
  except Exception as e:
336
416
  display_error(f"Error in parsing self-reflection json {e}. Retrying")
337
417
  logging.error("Reflection parsing failed.", exc_info=True)
338
418
  messages.append({"role": "assistant", "content": f"Self Reflection failed."})
419
+ reflection_count += 1
420
+ continue # Continue even after error to try again
339
421
 
340
- reflection_count += 1
341
-
342
- self.chat_history.append({"role": "user", "content": prompt})
343
- self.chat_history.append({"role": "assistant", "content": response_text})
344
-
345
- if self.verbose:
346
- logging.info(f"Agent {self.name} final response: {response_text}")
347
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
348
- return response_text
349
422
  except Exception as e:
350
423
  display_error(f"Error in chat: {e}")
351
424
  return None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: praisonaiagents
3
- Version: 0.0.6
3
+ Version: 0.0.8
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.6"
7
+ version = "0.0.8"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  authors = [
10
10
  { name="Mervin Praison" }