praisonaiagents 0.0.20__py3-none-any.whl → 0.0.22__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -5,6 +5,7 @@ Praison AI Agents - A package for hierarchical AI agent task execution
5
5
  from .agent.agent import Agent
6
6
  from .agents.agents import PraisonAIAgents
7
7
  from .task.task import Task
8
+ from .tools.tools import Tools
8
9
  from .main import (
9
10
  TaskOutput,
10
11
  ReflectionOutput,
@@ -21,9 +22,14 @@ from .main import (
21
22
  async_display_callbacks,
22
23
  )
23
24
 
25
+ # Add Agents as an alias for PraisonAIAgents
26
+ Agents = PraisonAIAgents
27
+
24
28
  __all__ = [
25
29
  'Agent',
26
30
  'PraisonAIAgents',
31
+ 'Agents',
32
+ 'Tools',
27
33
  'Task',
28
34
  'TaskOutput',
29
35
  'ReflectionOutput',
@@ -3,7 +3,7 @@ import time
3
3
  import json
4
4
  import logging
5
5
  import asyncio
6
- from typing import List, Optional, Any, Dict, Union, Literal
6
+ from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING
7
7
  from rich.console import Console
8
8
  from rich.live import Live
9
9
  from openai import AsyncOpenAI
@@ -19,6 +19,9 @@ from ..main import (
19
19
  error_logs
20
20
  )
21
21
 
22
+ if TYPE_CHECKING:
23
+ from ..task.task import Task
24
+
22
25
  class Agent:
23
26
  def _generate_tool_definition(self, function_name):
24
27
  """
@@ -132,11 +135,12 @@ class Agent:
132
135
 
133
136
  def __init__(
134
137
  self,
135
- name: str,
136
- role: str,
137
- goal: str,
138
- backstory: str,
139
- llm: Optional[Union[str, Any]] = "gpt-4o",
138
+ name: Optional[str] = None,
139
+ role: Optional[str] = None,
140
+ goal: Optional[str] = None,
141
+ backstory: Optional[str] = None,
142
+ instructions: Optional[str] = None,
143
+ llm: Optional[Union[str, Any]] = None,
140
144
  tools: Optional[List[Any]] = None,
141
145
  function_calling_llm: Optional[Any] = None,
142
146
  max_iter: int = 20,
@@ -158,16 +162,35 @@ class Agent:
158
162
  knowledge_sources: Optional[List[Any]] = None,
159
163
  use_system_prompt: Optional[bool] = True,
160
164
  markdown: bool = True,
161
- self_reflect: bool = True,
165
+ self_reflect: Optional[bool] = None,
162
166
  max_reflect: int = 3,
163
167
  min_reflect: int = 1,
164
168
  reflect_llm: Optional[str] = None
165
169
  ):
166
- self.name = name
167
- self.role = role
168
- self.goal = goal
169
- self.backstory = backstory
170
- self.llm = llm
170
+ # Handle backward compatibility for required fields
171
+ if all(x is None for x in [name, role, goal, backstory, instructions]):
172
+ raise ValueError("At least one of name, role, goal, backstory, or instructions must be provided")
173
+
174
+ # If instructions are provided, use them to set role, goal, and backstory
175
+ if instructions:
176
+ self.name = name or "Agent"
177
+ self.role = role or "Assistant"
178
+ self.goal = goal or instructions
179
+ self.backstory = backstory or instructions
180
+ # Set self_reflect to False by default for instruction-based agents
181
+ self.self_reflect = False if self_reflect is None else self_reflect
182
+ else:
183
+ # Use provided values or defaults
184
+ self.name = name or "Agent"
185
+ self.role = role or "Assistant"
186
+ self.goal = goal or "Help the user with their tasks"
187
+ self.backstory = backstory or "I am an AI assistant"
188
+ # Default to True for traditional agents if not specified
189
+ self.self_reflect = True if self_reflect is None else self_reflect
190
+
191
+ self.instructions = instructions
192
+ # Check for model name in environment variable if not provided
193
+ self.llm = llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
171
194
  self.tools = tools if tools else [] # Store original tools
172
195
  self.function_calling_llm = function_calling_llm
173
196
  self.max_iter = max_iter
@@ -190,10 +213,10 @@ class Agent:
190
213
  self.use_system_prompt = use_system_prompt
191
214
  self.chat_history = []
192
215
  self.markdown = markdown
193
- self.self_reflect = self_reflect
194
216
  self.max_reflect = max_reflect
195
217
  self.min_reflect = min_reflect
196
- self.reflect_llm = reflect_llm
218
+ # Use the same model selection logic for reflect_llm
219
+ self.reflect_llm = reflect_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
197
220
  self.console = Console() # Create a single console instance for the agent
198
221
 
199
222
  # Initialize system prompt
@@ -202,6 +225,21 @@ Your Role: {self.role}\n
202
225
  Your Goal: {self.goal}
203
226
  """
204
227
 
228
+ def generate_task(self) -> 'Task':
229
+ """Generate a Task object from the agent's instructions"""
230
+ from ..task.task import Task
231
+
232
+ description = self.instructions if self.instructions else f"Execute task as {self.role} with goal: {self.goal}"
233
+ expected_output = "Complete the assigned task successfully"
234
+
235
+ return Task(
236
+ name=self.name,
237
+ description=description,
238
+ expected_output=expected_output,
239
+ agent=self,
240
+ tools=self.tools
241
+ )
242
+
205
243
  def execute_tool(self, function_name, arguments):
206
244
  """
207
245
  Execute a tool dynamically based on the function name and arguments.
@@ -41,7 +41,10 @@ def process_video(video_path: str, seconds_per_frame=2):
41
41
  return base64_frames
42
42
 
43
43
  class PraisonAIAgents:
44
- def __init__(self, agents, tasks, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None):
44
+ def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None):
45
+ if not agents:
46
+ raise ValueError("At least one agent must be provided")
47
+
45
48
  self.agents = agents
46
49
  self.tasks = {}
47
50
  if max_retries < 3:
@@ -51,12 +54,41 @@ class PraisonAIAgents:
51
54
  self.verbose = verbose
52
55
  self.max_retries = max_retries
53
56
  self.process = process
54
- if not manager_llm:
55
- logging.debug("No manager_llm provided. Using OPENAI_MODEL_NAME environment variable or defaulting to 'gpt-4o'")
56
- self.manager_llm = manager_llm if manager_llm else os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
57
+
58
+ # Check for manager_llm in environment variable if not provided
59
+ self.manager_llm = manager_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
60
+ if self.verbose:
61
+ logging.info(f"Using model {self.manager_llm} for manager")
62
+
63
+ # If no tasks provided, generate them from agents
64
+ if tasks is None:
65
+ tasks = []
66
+ for agent in self.agents:
67
+ task = agent.generate_task()
68
+ tasks.append(task)
69
+ logging.info(f"Auto-generated {len(tasks)} tasks from agents")
70
+ else:
71
+ # Validate tasks for backward compatibility
72
+ if not tasks:
73
+ raise ValueError("If tasks are provided, at least one task must be present")
74
+ logging.info(f"Using {len(tasks)} provided tasks")
75
+
76
+ # Add tasks and set their status
57
77
  for task in tasks:
58
78
  self.add_task(task)
59
79
  task.status = "not started"
80
+
81
+ # If tasks were auto-generated from agents or process is sequential, set up sequential flow
82
+ if len(tasks) > 1 and (process == "sequential" or all(task.next_tasks == [] for task in tasks)):
83
+ for i in range(len(tasks) - 1):
84
+ # Set up next task relationship
85
+ tasks[i].next_tasks = [tasks[i + 1].name]
86
+ # Set up context for the next task to include the current task
87
+ if tasks[i + 1].context is None:
88
+ tasks[i + 1].context = []
89
+ tasks[i + 1].context.append(tasks[i])
90
+ logging.info("Set up sequential flow with automatic context passing")
91
+
60
92
  self._state = {} # Add state storage at PraisonAIAgents level
61
93
 
62
94
  def add_task(self, task):
@@ -119,9 +151,9 @@ Expected Output: {task.expected_output}.
119
151
  else:
120
152
  context_results += f"Previous task {context_task.name if context_task.name else context_task.description} had no result.\n"
121
153
  task_prompt += f"""
122
- Here are the results of previous tasks that might be useful:\n
123
- {context_results}
124
- """
154
+ Here are the results of previous tasks that might be useful:\n
155
+ {context_results}
156
+ """
125
157
  task_prompt += "Please provide only the final result of your work. Do not add any conversation or extra explanation."
126
158
 
127
159
  if self.verbose >= 2:
@@ -320,7 +352,7 @@ Expected Output: {task.expected_output}.
320
352
  task_prompt = f"""
321
353
  You need to do the following task: {task.description}.
322
354
  Expected Output: {task.expected_output}.
323
- """
355
+ """
324
356
  if task.context:
325
357
  context_results = ""
326
358
  for context_task in task.context:
@@ -329,9 +361,9 @@ Expected Output: {task.expected_output}.
329
361
  else:
330
362
  context_results += f"Previous task {context_task.name if context_task.name else context_task.description} had no result.\n"
331
363
  task_prompt += f"""
332
- Here are the results of previous tasks that might be useful:\n
333
- {context_results}
334
- """
364
+ Here are the results of previous tasks that might be useful:\n
365
+ {context_results}
366
+ """
335
367
  task_prompt += "Please provide only the final result of your work. Do not add any conversation or extra explanation."
336
368
 
337
369
  if self.verbose >= 2:
@@ -0,0 +1,4 @@
1
+ """Tools package for PraisonAI Agents"""
2
+ from .tools import Tools
3
+
4
+ __all__ = ['Tools']
@@ -0,0 +1,40 @@
1
+ """Tools module for PraisonAI Agents"""
2
+ from typing import List, Dict
3
+ import logging
4
+ import importlib
5
+
6
+ class Tools:
7
+ @staticmethod
8
+ def internet_search(query: str) -> List[Dict]:
9
+ """
10
+ Perform a search using DuckDuckGo.
11
+
12
+ Args:
13
+ query (str): The search query.
14
+
15
+ Returns:
16
+ list: A list of search result titles, URLs, and snippets.
17
+ """
18
+ # Check if duckduckgo_search is installed
19
+ if importlib.util.find_spec("duckduckgo_search") is None:
20
+ error_msg = "DuckDuckGo search is not available. Please install duckduckgo_search package using: pip install duckduckgo_search"
21
+ logging.error(error_msg)
22
+ return [{"error": error_msg}]
23
+
24
+ try:
25
+ # Import only when needed
26
+ from duckduckgo_search import DDGS
27
+ results = []
28
+ ddgs = DDGS()
29
+ for result in ddgs.text(keywords=query, max_results=5):
30
+ results.append({
31
+ "title": result.get("title", ""),
32
+ "url": result.get("href", ""),
33
+ "snippet": result.get("body", "")
34
+ })
35
+ return results
36
+
37
+ except Exception as e:
38
+ error_msg = f"Error during DuckDuckGo search: {e}"
39
+ logging.error(error_msg)
40
+ return [{"error": error_msg}]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: praisonaiagents
3
- Version: 0.0.20
3
+ Version: 0.0.22
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,9 +1,9 @@
1
- praisonaiagents/__init__.py,sha256=KKB8sfpTh1Lf0gz9ULe6a0sA2JpGqOevH80RpM8p0oM,923
1
+ praisonaiagents/__init__.py,sha256=xJLN8i6V9SRmJFMxSRWDQt_hBePoupVd3WanNIgbBbc,1052
2
2
  praisonaiagents/main.py,sha256=7Phfe0gdxHzbhPb3WRzBTfq9CaLq0K31M5DM_4oCiCQ,12451
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=_UmUWGbZjd3tApPX2T6RPB5Pll3Gos97XBhhg_zmfn8,30662
4
+ praisonaiagents/agent/agent.py,sha256=E4Kb-U8IDhlvn0F4-oeSEof3_vuu4FDsQmoPZSdYUaE,32635
5
5
  praisonaiagents/agents/__init__.py,sha256=7RDeQNSqZg5uBjD4M_0p_F6YgfWuDuxPFydPU50kDYc,120
6
- praisonaiagents/agents/agents.py,sha256=P8JJ1849-djMDkMuP0kNhPwtg97L8gO60jYXzXFcPc0,21762
6
+ praisonaiagents/agents/agents.py,sha256=Q2skltMf0xYBImpEW528_Kv2ClnLDyTy3yIzS24hGbY,23097
7
7
  praisonaiagents/build/lib/praisonaiagents/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
8
8
  praisonaiagents/build/lib/praisonaiagents/main.py,sha256=zDhN5KKtKbfruolDNxlyJkcFlkSt4KQkQTDRfQVAhxc,3960
9
9
  praisonaiagents/build/lib/praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
@@ -16,7 +16,9 @@ praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0
16
16
  praisonaiagents/process/process.py,sha256=4qXdrCDQPH5MtvHvdJVURXKNgSl6ae3OYTiqAF_A2ZU,24295
17
17
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
18
18
  praisonaiagents/task/task.py,sha256=UiiWgLDOdX_w0opP8h8-u-leVZlq1CkpGUmf7L2qyJs,3110
19
- praisonaiagents-0.0.20.dist-info/METADATA,sha256=HEoTvC97N36YxNBbb7VEJgB7tWZCvGOR-tpcAYMwEus,233
20
- praisonaiagents-0.0.20.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
21
- praisonaiagents-0.0.20.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
22
- praisonaiagents-0.0.20.dist-info/RECORD,,
19
+ praisonaiagents/tools/__init__.py,sha256=rFen7LHI55mxBVW2raQmqKnK_JFuvTVVAuUmrQpDg_c,87
20
+ praisonaiagents/tools/tools.py,sha256=fs0fjW7Y_k9qsaR0CmlPjayiiVrDxDmgfr5SwR1Tdck,1363
21
+ praisonaiagents-0.0.22.dist-info/METADATA,sha256=CSsaNWWddnP3xTnXv8OTlglEHBX9S3CZeWJZEw_yTIM,233
22
+ praisonaiagents-0.0.22.dist-info/WHEEL,sha256=A3WOREP4zgxI0fKrHUG8DC8013e3dK3n7a6HDbcEIwE,91
23
+ praisonaiagents-0.0.22.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
24
+ praisonaiagents-0.0.22.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.6.0)
2
+ Generator: setuptools (75.7.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5