praisonaiagents 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -3,7 +3,7 @@ Praison AI Agents - A package for hierarchical AI agent task execution
3
3
  """
4
4
 
5
5
  from .agent.agent import Agent
6
- from .agents.agents import Agents
6
+ from .agents.agents import PraisonAIAgents
7
7
  from .task.task import Task
8
8
  from .main import (
9
9
  TaskOutput,
@@ -20,7 +20,7 @@ from .main import (
20
20
 
21
21
  __all__ = [
22
22
  'Agent',
23
- 'Agents',
23
+ 'PraisonAIAgents',
24
24
  'Task',
25
25
  'TaskOutput',
26
26
  'ReflectionOutput',
@@ -1,4 +1,4 @@
1
1
  """Agents module for managing multiple AI agents"""
2
- from .agents import Agents
2
+ from .agents import PraisonAIAgents
3
3
 
4
- __all__ = ['Agents']
4
+ __all__ = ['PraisonAIAgents']
@@ -11,7 +11,7 @@ from ..main import display_error, TaskOutput, error_logs, client
11
11
  from ..agent.agent import Agent
12
12
  from ..task.task import Task
13
13
 
14
- class Agents:
14
+ class PraisonAIAgents:
15
15
  def __init__(self, agents, tasks, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None):
16
16
  self.agents = agents
17
17
  self.tasks = {}
@@ -22,7 +22,7 @@ class Agents:
22
22
  self.verbose = verbose
23
23
  self.max_retries = max_retries
24
24
  self.process = process
25
- self.manager_llm = manager_llm
25
+ self.manager_llm = manager_llm if manager_llm else os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
26
26
  for task in tasks:
27
27
  self.add_task(task)
28
28
  task.status = "not started"
@@ -0,0 +1,4 @@
1
+ """Agent module for AI agents"""
2
+ from .agent import Agent
3
+
4
+ __all__ = ['Agent']
@@ -0,0 +1,350 @@
1
+ import logging
2
+ import json
3
+ import time
4
+ from typing import List, Optional, Any, Dict, Union, Literal
5
+ from rich.console import Console
6
+ from rich.live import Live
7
+ from ..main import (
8
+ display_error,
9
+ display_tool_call,
10
+ display_instruction,
11
+ display_interaction,
12
+ display_generating,
13
+ ReflectionOutput,
14
+ client,
15
+ error_logs
16
+ )
17
+
18
+ class Agent:
19
+ def __init__(
20
+ self,
21
+ name: str,
22
+ role: str,
23
+ goal: str,
24
+ backstory: str,
25
+ llm: Optional[Union[str, Any]] = "gpt-4o-mini",
26
+ tools: Optional[List[Any]] = None,
27
+ function_calling_llm: Optional[Any] = None,
28
+ max_iter: int = 20,
29
+ max_rpm: Optional[int] = None,
30
+ max_execution_time: Optional[int] = None,
31
+ memory: bool = True,
32
+ verbose: bool = False,
33
+ allow_delegation: bool = False,
34
+ step_callback: Optional[Any] = None,
35
+ cache: bool = True,
36
+ system_template: Optional[str] = None,
37
+ prompt_template: Optional[str] = None,
38
+ response_template: Optional[str] = None,
39
+ allow_code_execution: Optional[bool] = False,
40
+ max_retry_limit: int = 2,
41
+ respect_context_window: bool = True,
42
+ code_execution_mode: Literal["safe", "unsafe"] = "safe",
43
+ embedder_config: Optional[Dict[str, Any]] = None,
44
+ knowledge_sources: Optional[List[Any]] = None,
45
+ use_system_prompt: Optional[bool] = True,
46
+ markdown: bool = True,
47
+ self_reflect: bool = True,
48
+ max_reflection_iter: int = 3
49
+ ):
50
+ self.name = name
51
+ self.role = role
52
+ self.goal = goal
53
+ self.backstory = backstory
54
+ self.llm = llm
55
+ self.tools = tools if tools else []
56
+ self.function_calling_llm = function_calling_llm
57
+ self.max_iter = max_iter
58
+ self.max_rpm = max_rpm
59
+ self.max_execution_time = max_execution_time
60
+ self.memory = memory
61
+ self.verbose = verbose
62
+ self.allow_delegation = allow_delegation
63
+ self.step_callback = step_callback
64
+ self.cache = cache
65
+ self.system_template = system_template
66
+ self.prompt_template = prompt_template
67
+ self.response_template = response_template
68
+ self.allow_code_execution = allow_code_execution
69
+ self.max_retry_limit = max_retry_limit
70
+ self.respect_context_window = respect_context_window
71
+ self.code_execution_mode = code_execution_mode
72
+ self.embedder_config = embedder_config
73
+ self.knowledge_sources = knowledge_sources
74
+ self.use_system_prompt = use_system_prompt
75
+ self.chat_history = []
76
+ self.markdown = markdown
77
+ self.self_reflect = self_reflect
78
+ self.max_reflection_iter = max_reflection_iter
79
+
80
+ def execute_tool(self, function_name, arguments):
81
+ logging.debug(f"{self.name} executing tool {function_name} with arguments: {arguments}")
82
+ if function_name == "get_weather":
83
+ location = arguments.get("location", "Unknown Location")
84
+ return {"temperature": "25C", "condition": "Sunny", "location": location}
85
+ elif function_name == "search_tool":
86
+ query = arguments.get("query", "AI trends in 2024")
87
+ return {"results": [
88
+ {"title": "AI advancements in 2024", "link": "url1", "summary": "Lots of advancements"},
89
+ {"title": "New trends in AI", "link": "url2", "summary": "New trends being found"}
90
+ ]}
91
+ else:
92
+ return f"Tool '{function_name}' is not recognized"
93
+
94
+ def clear_history(self):
95
+ self.chat_history = []
96
+
97
+ def __str__(self):
98
+ return f"Agent(name='{self.name}', role='{self.role}', goal='{self.goal}')"
99
+
100
+ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True):
101
+ console = Console()
102
+ start_time = time.time()
103
+ logging.debug(f"{self.name} sending messages to LLM: {messages}")
104
+
105
+ formatted_tools = []
106
+ if tools:
107
+ for tool in tools:
108
+ if isinstance(tool, dict):
109
+ formatted_tools.append(tool)
110
+ elif hasattr(tool, "to_openai_tool"):
111
+ formatted_tools.append(tool.to_openai_tool())
112
+ elif isinstance(tool, str):
113
+ formatted_tools.append({
114
+ "type": "function",
115
+ "function": {
116
+ "name": tool,
117
+ "description": f"This is a tool called {tool}",
118
+ "parameters": {
119
+ "type": "object",
120
+ "properties": {},
121
+ },
122
+ }
123
+ })
124
+ else:
125
+ display_error(f"Warning: Tool {tool} not recognized")
126
+
127
+ try:
128
+ initial_response = client.chat.completions.create(
129
+ model=self.llm,
130
+ messages=messages,
131
+ temperature=temperature,
132
+ tools=formatted_tools if formatted_tools else None,
133
+ stream=False
134
+ )
135
+
136
+ tool_calls = getattr(initial_response.choices[0].message, 'tool_calls', None)
137
+
138
+ if tool_calls:
139
+ messages.append({
140
+ "role": "assistant",
141
+ "content": initial_response.choices[0].message.content,
142
+ "tool_calls": tool_calls
143
+ })
144
+
145
+ for tool_call in tool_calls:
146
+ function_name = tool_call.function.name
147
+ arguments = json.loads(tool_call.function.arguments)
148
+
149
+ if self.verbose:
150
+ display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
151
+
152
+ tool_result = self.execute_tool(function_name, arguments)
153
+ results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
154
+
155
+ if self.verbose:
156
+ display_tool_call(f"Function '{function_name}' returned: {results_str}")
157
+
158
+ messages.append({
159
+ "role": "tool",
160
+ "tool_call_id": tool_call.id,
161
+ "content": results_str
162
+ })
163
+
164
+ if stream:
165
+ response_stream = client.chat.completions.create(
166
+ model=self.llm,
167
+ messages=messages,
168
+ temperature=temperature,
169
+ stream=True
170
+ )
171
+ full_response_text = ""
172
+ with Live(display_generating("", start_time), refresh_per_second=4) as live:
173
+ for chunk in response_stream:
174
+ if chunk.choices[0].delta.content:
175
+ full_response_text += chunk.choices[0].delta.content
176
+ live.update(display_generating(full_response_text, start_time))
177
+
178
+ final_response = client.chat.completions.create(
179
+ model=self.llm,
180
+ messages=messages,
181
+ temperature=temperature,
182
+ stream=False
183
+ )
184
+ return final_response
185
+ else:
186
+ if tool_calls:
187
+ final_response = client.chat.completions.create(
188
+ model=self.llm,
189
+ messages=messages,
190
+ temperature=temperature,
191
+ stream=False
192
+ )
193
+ return final_response
194
+ else:
195
+ return initial_response
196
+
197
+ except Exception as e:
198
+ display_error(f"Error in chat completion: {e}")
199
+ return None
200
+
201
+ def chat(self, prompt, temperature=0.2, tools=None, output_json=None):
202
+ if self.use_system_prompt:
203
+ system_prompt = f"""{self.backstory}\n
204
+ Your Role: {self.role}\n
205
+ Your Goal: {self.goal}
206
+ """
207
+ else:
208
+ system_prompt = None
209
+
210
+ messages = []
211
+ if system_prompt:
212
+ messages.append({"role": "system", "content": system_prompt})
213
+ messages.extend(self.chat_history)
214
+ messages.append({"role": "user", "content": prompt})
215
+
216
+ final_response_text = None
217
+ reflection_count = 0
218
+ start_time = time.time()
219
+
220
+ while True:
221
+ try:
222
+ if self.verbose:
223
+ display_instruction(f"Agent {self.name} is processing prompt: {prompt}")
224
+
225
+ formatted_tools = []
226
+ if tools:
227
+ for tool in tools:
228
+ if isinstance(tool, dict):
229
+ formatted_tools.append(tool)
230
+ elif hasattr(tool, "to_openai_tool"):
231
+ formatted_tools.append(tool.to_openai_tool())
232
+ elif isinstance(tool, str):
233
+ formatted_tools.append({
234
+ "type": "function",
235
+ "function": {
236
+ "name": tool,
237
+ "description": f"This is a tool called {tool}",
238
+ "parameters": {
239
+ "type": "object",
240
+ "properties": {},
241
+ },
242
+ }
243
+ })
244
+ else:
245
+ display_error(f"Warning: Tool {tool} not recognized")
246
+
247
+ response = self._chat_completion(messages, temperature=temperature, tools=formatted_tools if formatted_tools else None)
248
+ if not response:
249
+ return None
250
+
251
+ tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
252
+
253
+ if tool_calls:
254
+ messages.append({
255
+ "role": "assistant",
256
+ "content": response.choices[0].message.content,
257
+ "tool_calls": tool_calls
258
+ })
259
+
260
+ for tool_call in tool_calls:
261
+ function_name = tool_call.function.name
262
+ arguments = json.loads(tool_call.function.arguments)
263
+
264
+ if self.verbose:
265
+ display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
266
+
267
+ tool_result = self.execute_tool(function_name, arguments)
268
+
269
+ if tool_result:
270
+ if self.verbose:
271
+ display_tool_call(f"Function '{function_name}' returned: {tool_result}")
272
+ messages.append({
273
+ "role": "tool",
274
+ "tool_call_id": tool_call.id,
275
+ "content": json.dumps(tool_result)
276
+ })
277
+ else:
278
+ messages.append({
279
+ "role": "tool",
280
+ "tool_call_id": tool_call.id,
281
+ "content": "Function returned an empty output"
282
+ })
283
+
284
+ response = self._chat_completion(messages, temperature=temperature)
285
+ if not response:
286
+ return None
287
+ response_text = response.choices[0].message.content.strip()
288
+ else:
289
+ response_text = response.choices[0].message.content.strip()
290
+
291
+ if not self.self_reflect:
292
+ self.chat_history.append({"role": "user", "content": prompt})
293
+ self.chat_history.append({"role": "assistant", "content": response_text})
294
+ if self.verbose:
295
+ logging.info(f"Agent {self.name} final response: {response_text}")
296
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
297
+ return response_text
298
+
299
+ reflection_prompt = f"""
300
+ Reflect on your previous response: '{response_text}'.
301
+ Identify any flaws, improvements, or actions.
302
+ Provide a "satisfactory" status ('yes' or 'no').
303
+ Output MUST be JSON with 'reflection' and 'satisfactory'.
304
+ """
305
+ logging.debug(f"{self.name} reflection attempt {reflection_count+1}, sending prompt: {reflection_prompt}")
306
+ messages.append({"role": "user", "content": reflection_prompt})
307
+
308
+ try:
309
+ reflection_response = client.beta.chat.completions.parse(
310
+ model=self.llm,
311
+ messages=messages,
312
+ temperature=temperature,
313
+ response_format=ReflectionOutput
314
+ )
315
+
316
+ reflection_output = reflection_response.choices[0].message.parsed
317
+
318
+ if self.verbose:
319
+ display_self_reflection(f"Agent {self.name} self reflection: reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'")
320
+
321
+ messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})
322
+
323
+ if reflection_output.satisfactory == "yes":
324
+ if self.verbose:
325
+ display_self_reflection("Agent marked the response as satisfactory")
326
+ self.chat_history.append({"role": "assistant", "content": response_text})
327
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
328
+ return response_text
329
+
330
+ logging.debug(f"{self.name} reflection not satisfactory, requesting regeneration.")
331
+ messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
332
+ response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
333
+ response_text = response.choices[0].message.content.strip()
334
+ except Exception as e:
335
+ display_error(f"Error in parsing self-reflection json {e}. Retrying")
336
+ logging.error("Reflection parsing failed.", exc_info=True)
337
+ messages.append({"role": "assistant", "content": f"Self Reflection failed."})
338
+
339
+ reflection_count += 1
340
+
341
+ self.chat_history.append({"role": "user", "content": prompt})
342
+ self.chat_history.append({"role": "assistant", "content": response_text})
343
+
344
+ if self.verbose:
345
+ logging.info(f"Agent {self.name} final response: {response_text}")
346
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
347
+ return response_text
348
+ except Exception as e:
349
+ display_error(f"Error in chat: {e}")
350
+ return None
@@ -0,0 +1,4 @@
1
+ """Agents module for managing multiple AI agents"""
2
+ from .agents import Agents
3
+
4
+ __all__ = ['Agents']
@@ -0,0 +1,318 @@
1
+ import os
2
+ import time
3
+ import json
4
+ import logging
5
+ from typing import Any, Dict, Optional
6
+ from pydantic import BaseModel
7
+ from rich.text import Text
8
+ from rich.panel import Panel
9
+ from rich.console import Console
10
+ from ..main import display_error, TaskOutput, error_logs, client
11
+ from ..agent.agent import Agent
12
+ from ..task.task import Task
13
+
14
+ class Agents:
15
+ def __init__(self, agents, tasks, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None):
16
+ self.agents = agents
17
+ self.tasks = {}
18
+ if max_retries < 3:
19
+ max_retries = 3
20
+ self.completion_checker = completion_checker if completion_checker else self.default_completion_checker
21
+ self.task_id_counter = 0
22
+ self.verbose = verbose
23
+ self.max_retries = max_retries
24
+ self.process = process
25
+ self.manager_llm = manager_llm
26
+ for task in tasks:
27
+ self.add_task(task)
28
+ task.status = "not started"
29
+
30
+ def add_task(self, task):
31
+ task_id = self.task_id_counter
32
+ task.id = task_id
33
+ self.tasks[task_id] = task
34
+ self.task_id_counter += 1
35
+ return task_id
36
+
37
+ def clean_json_output(self, output: str) -> str:
38
+ cleaned = output.strip()
39
+ if cleaned.startswith("```json"):
40
+ cleaned = cleaned[len("```json"):].strip()
41
+ if cleaned.startswith("```"):
42
+ cleaned = cleaned[len("```"):].strip()
43
+ if cleaned.endswith("```"):
44
+ cleaned = cleaned[:-3].strip()
45
+ return cleaned
46
+
47
+ def default_completion_checker(self, task, agent_output):
48
+ if task.output_json and task.result and task.result.json_dict:
49
+ return True
50
+ if task.output_pydantic and task.result and task.result.pydantic:
51
+ return True
52
+ return len(agent_output.strip()) > 0
53
+
54
+ def execute_task(self, task_id):
55
+ if task_id not in self.tasks:
56
+ display_error(f"Error: Task with ID {task_id} does not exist")
57
+ return
58
+ task = self.tasks[task_id]
59
+ if task.status == "not started":
60
+ task.status = "in progress"
61
+
62
+ executor_agent = task.agent
63
+
64
+ task_prompt = f"""
65
+ You need to do the following task: {task.description}.
66
+ Expected Output: {task.expected_output}.
67
+ """
68
+ if task.context:
69
+ context_results = ""
70
+ for context_task in task.context:
71
+ if context_task.result:
72
+ context_results += f"Result of previous task {context_task.name if context_task.name else context_task.description}: {context_task.result.raw}\n"
73
+ else:
74
+ context_results += f"Previous task {context_task.name if context_task.name else context_task.description} had no result.\n"
75
+ task_prompt += f"""
76
+ Here are the results of previous tasks that might be useful:\n
77
+ {context_results}
78
+ """
79
+ task_prompt += "Please provide only the final result of your work. Do not add any conversation or extra explanation."
80
+
81
+ if self.verbose >= 2:
82
+ logging.info(f"Executing task {task_id}: {task.description} using {executor_agent.name}")
83
+ logging.debug(f"Starting execution of task {task_id} with prompt:\n{task_prompt}")
84
+ agent_output = executor_agent.chat(task_prompt, tools=task.tools)
85
+ if agent_output:
86
+ task_output = TaskOutput(
87
+ description=task.description,
88
+ summary=task.description[:10],
89
+ raw=agent_output,
90
+ agent=executor_agent.name,
91
+ output_format="RAW"
92
+ )
93
+
94
+ if task.output_json:
95
+ cleaned = self.clean_json_output(agent_output)
96
+ try:
97
+ parsed = json.loads(cleaned)
98
+ task_output.json_dict = parsed
99
+ task_output.output_format = "JSON"
100
+ except:
101
+ logging.warning(f"Warning: Could not parse output of task {task_id} as JSON")
102
+ logging.debug(f"Output that failed JSON parsing: {agent_output}")
103
+
104
+ if task.output_pydantic:
105
+ cleaned = self.clean_json_output(agent_output)
106
+ try:
107
+ parsed = json.loads(cleaned)
108
+ pyd_obj = task.output_pydantic(**parsed)
109
+ task_output.pydantic = pyd_obj
110
+ task_output.output_format = "Pydantic"
111
+ except:
112
+ logging.warning(f"Warning: Could not parse output of task {task_id} as Pydantic Model")
113
+ logging.debug(f"Output that failed Pydantic parsing: {agent_output}")
114
+
115
+ task.result = task_output
116
+ return task_output
117
+ else:
118
+ task.status = "failed"
119
+ return None
120
+
121
+ def save_output_to_file(self, task, task_output):
122
+ if task.output_file:
123
+ try:
124
+ if task.create_directory:
125
+ os.makedirs(os.path.dirname(task.output_file), exist_ok=True)
126
+ with open(task.output_file, "w") as f:
127
+ f.write(str(task_output))
128
+ if self.verbose >= 1:
129
+ logging.info(f"Task output saved to {task.output_file}")
130
+ except Exception as e:
131
+ display_error(f"Error saving task output to file: {e}")
132
+
133
+ def run_task(self, task_id):
134
+ if task_id not in self.tasks:
135
+ display_error(f"Error: Task with ID {task_id} does not exist")
136
+ return
137
+ task = self.tasks[task_id]
138
+ if task.status == "completed":
139
+ logging.info(f"Task with ID {task_id} is already completed")
140
+ return
141
+
142
+ retries = 0
143
+ while task.status != "completed" and retries < self.max_retries:
144
+ logging.debug(f"Attempt {retries+1} for task {task_id}")
145
+ if task.status in ["not started", "in progress"]:
146
+ task_output = self.execute_task(task_id)
147
+ if task_output and self.completion_checker(task, task_output.raw):
148
+ task.status = "completed"
149
+ if task.callback:
150
+ task.callback(task_output)
151
+ self.save_output_to_file(task, task_output)
152
+ if self.verbose >= 1:
153
+ logging.info(f"Task {task_id} completed successfully.")
154
+ else:
155
+ task.status = "in progress"
156
+ if self.verbose >= 1:
157
+ logging.info(f"Task {task_id} not completed, retrying")
158
+ time.sleep(1)
159
+ retries += 1
160
+ else:
161
+ if task.status == "failed":
162
+ logging.info("Task is failed, resetting to in-progress for another try...")
163
+ task.status = "in progress"
164
+ else:
165
+ logging.info("Invalid Task status")
166
+ break
167
+
168
+ if retries == self.max_retries and task.status != "completed":
169
+ logging.info(f"Task {task_id} failed after {self.max_retries} retries.")
170
+
171
+ def run_all_tasks(self):
172
+ if self.process == "sequential":
173
+ for task_id in self.tasks:
174
+ if self.tasks[task_id].status != "completed":
175
+ self.run_task(task_id)
176
+ elif self.process == "hierarchical":
177
+ logging.debug(f"Starting hierarchical task execution with {len(self.tasks)} tasks")
178
+ manager_agent = Agent(
179
+ name="Manager",
180
+ role="Project manager",
181
+ goal="Manage the entire flow of tasks and delegate them to the right agent",
182
+ backstory="Expert project manager to coordinate tasks among agents",
183
+ llm=self.manager_llm,
184
+ verbose=self.verbose,
185
+ markdown=True,
186
+ self_reflect=False
187
+ )
188
+
189
+ class ManagerInstructions(BaseModel):
190
+ task_id: int
191
+ agent_name: str
192
+ action: str
193
+
194
+ manager_task = Task(
195
+ name="manager_task",
196
+ description="Decide the order of tasks and which agent executes them",
197
+ expected_output="All tasks completed successfully",
198
+ agent=manager_agent
199
+ )
200
+ manager_task_id = self.add_task(manager_task)
201
+ logging.info(f"Created manager task with ID {manager_task_id}")
202
+
203
+ completed_count = 0
204
+ total_tasks = len(self.tasks) - 1
205
+ logging.info(f"Need to complete {total_tasks} tasks (excluding manager task)")
206
+
207
+ while completed_count < total_tasks:
208
+ tasks_summary = []
209
+ for tid, tk in self.tasks.items():
210
+ if tk.name == "manager_task":
211
+ continue
212
+ task_info = {
213
+ "task_id": tid,
214
+ "name": tk.name,
215
+ "description": tk.description,
216
+ "status": tk.status if tk.status else "not started",
217
+ "agent": tk.agent.name if tk.agent else "No agent"
218
+ }
219
+ tasks_summary.append(task_info)
220
+ logging.info(f"Task {tid} status: {task_info}")
221
+
222
+ manager_prompt = f"""
223
+ Here is the current status of all tasks except yours (manager_task):
224
+ {tasks_summary}
225
+
226
+ Provide a JSON with the structure:
227
+ {{
228
+ "task_id": <int>,
229
+ "agent_name": "<string>",
230
+ "action": "<execute or stop>"
231
+ }}
232
+ """
233
+
234
+ try:
235
+ logging.info("Requesting manager instructions...")
236
+ manager_response = client.beta.chat.completions.parse(
237
+ model=self.manager_llm,
238
+ messages=[
239
+ {"role": "system", "content": manager_task.description},
240
+ {"role": "user", "content": manager_prompt}
241
+ ],
242
+ temperature=0.7,
243
+ response_format=ManagerInstructions
244
+ )
245
+ parsed_instructions = manager_response.choices[0].message.parsed
246
+ logging.info(f"Manager instructions: {parsed_instructions}")
247
+ except Exception as e:
248
+ display_error(f"Manager parse error: {e}")
249
+ logging.error(f"Manager parse error: {str(e)}", exc_info=True)
250
+ break
251
+
252
+ selected_task_id = parsed_instructions.task_id
253
+ selected_agent_name = parsed_instructions.agent_name
254
+ action = parsed_instructions.action
255
+
256
+ logging.info(f"Manager selected task_id={selected_task_id}, agent={selected_agent_name}, action={action}")
257
+
258
+ if action.lower() == "stop":
259
+ logging.info("Manager decided to stop task execution")
260
+ break
261
+
262
+ if selected_task_id not in self.tasks:
263
+ error_msg = f"Manager selected invalid task id {selected_task_id}"
264
+ display_error(error_msg)
265
+ logging.error(error_msg)
266
+ break
267
+
268
+ original_agent = self.tasks[selected_task_id].agent.name if self.tasks[selected_task_id].agent else "None"
269
+ for a in self.agents:
270
+ if a.name == selected_agent_name:
271
+ self.tasks[selected_task_id].agent = a
272
+ logging.info(f"Changed agent for task {selected_task_id} from {original_agent} to {selected_agent_name}")
273
+ break
274
+
275
+ if self.tasks[selected_task_id].status != "completed":
276
+ logging.info(f"Starting execution of task {selected_task_id}")
277
+ self.run_task(selected_task_id)
278
+ logging.info(f"Finished execution of task {selected_task_id}, status: {self.tasks[selected_task_id].status}")
279
+
280
+ if self.tasks[selected_task_id].status == "completed":
281
+ completed_count += 1
282
+ logging.info(f"Task {selected_task_id} completed. Total completed: {completed_count}/{total_tasks}")
283
+
284
+ self.tasks[manager_task.id].status = "completed"
285
+ if self.verbose >= 1:
286
+ logging.info("All tasks completed under manager supervision.")
287
+ logging.info("Hierarchical task execution finished")
288
+
289
+ def get_task_status(self, task_id):
290
+ if task_id in self.tasks:
291
+ return self.tasks[task_id].status
292
+ return None
293
+
294
+ def get_all_tasks_status(self):
295
+ return {task_id: self.tasks[task_id].status for task_id in self.tasks}
296
+
297
+ def get_task_result(self, task_id):
298
+ if task_id in self.tasks:
299
+ return self.tasks[task_id].result
300
+ return None
301
+
302
+ def get_task_details(self, task_id):
303
+ if task_id in self.tasks:
304
+ return str(self.tasks[task_id])
305
+ return None
306
+
307
+ def get_agent_details(self, agent_name):
308
+ agent = [task.agent for task in self.tasks.values() if task.agent and task.agent.name == agent_name]
309
+ if agent:
310
+ return str(agent[0])
311
+ return None
312
+
313
+ def start(self):
314
+ self.run_all_tasks()
315
+ return {
316
+ "task_status": self.get_all_tasks_status(),
317
+ "task_results": {task_id: self.get_task_result(task_id) for task_id in self.tasks}
318
+ }
@@ -0,0 +1,112 @@
1
+ import os
2
+ import time
3
+ import json
4
+ import logging
5
+ from typing import List, Optional, Dict, Any, Union, Literal, Type
6
+ from openai import OpenAI
7
+ from pydantic import BaseModel
8
+ from rich import print
9
+ from rich.console import Console
10
+ from rich.panel import Panel
11
+ from rich.text import Text
12
+ from rich.markdown import Markdown
13
+ from rich.logging import RichHandler
14
+ from rich.live import Live
15
+
16
+ LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
17
+
18
+ logging.basicConfig(
19
+ level=getattr(logging, LOGLEVEL, logging.INFO),
20
+ format="%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s",
21
+ datefmt="[%X]",
22
+ handlers=[RichHandler(rich_tracebacks=True)]
23
+ )
24
+
25
+ # Global list to store error logs
26
+ error_logs = []
27
+
28
+ def display_interaction(message: str, response: str, markdown: bool = True, generation_time: Optional[float] = None):
29
+ console = Console()
30
+ if generation_time is not None:
31
+ console.print(Text(f"Response generated in {generation_time:.1f}s", style="dim"))
32
+ else:
33
+ console.print(Text("Response Generation Complete", style="dim"))
34
+
35
+ if markdown:
36
+ console.print(Panel.fit(Markdown(message), title="Message", border_style="cyan"))
37
+ console.print(Panel.fit(Markdown(response), title="Response", border_style="cyan"))
38
+ else:
39
+ console.print(Panel.fit(Text(message, style="bold green"), title="Message", border_style="cyan"))
40
+ console.print(Panel.fit(Text(response, style="bold white"), title="Response", border_style="cyan"))
41
+
42
+ def display_self_reflection(message: str):
43
+ console = Console()
44
+ console.print(Panel.fit(Text(message, style="bold yellow"), title="Self Reflection", border_style="magenta"))
45
+
46
+ def display_instruction(message: str):
47
+ console = Console()
48
+ console.print(Panel.fit(Text(message, style="bold blue"), title="Instruction", border_style="cyan"))
49
+
50
+ def display_tool_call(message: str):
51
+ console = Console()
52
+ console.print(Panel.fit(Text(message, style="bold cyan"), title="Tool Call", border_style="green"))
53
+
54
+ def display_error(message: str):
55
+ console = Console()
56
+ console.print(Panel.fit(Text(message, style="bold red"), title="Error", border_style="red"))
57
+ # Store errors
58
+ error_logs.append(message)
59
+
60
+ def display_generating(content: str = "", start_time: Optional[float] = None):
61
+ elapsed_str = ""
62
+ if start_time is not None:
63
+ elapsed = time.time() - start_time
64
+ elapsed_str = f" {elapsed:.1f}s"
65
+ return Panel(Markdown(content), title=f"Generating...{elapsed_str}", border_style="green")
66
+
67
+ def clean_triple_backticks(text: str) -> str:
68
+ """Remove triple backticks and surrounding json fences from a string."""
69
+ cleaned = text.strip()
70
+ if cleaned.startswith("```json"):
71
+ cleaned = cleaned[len("```json"):].strip()
72
+ if cleaned.startswith("```"):
73
+ cleaned = cleaned[len("```"):].strip()
74
+ if cleaned.endswith("```"):
75
+ cleaned = cleaned[:-3].strip()
76
+ return cleaned
77
+
78
+ class ReflectionOutput(BaseModel):
79
+ reflection: str
80
+ satisfactory: Literal["yes", "no"]
81
+
82
+ client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
83
+
84
+ class TaskOutput(BaseModel):
85
+ description: str
86
+ summary: Optional[str] = None
87
+ raw: str
88
+ pydantic: Optional[BaseModel] = None
89
+ json_dict: Optional[Dict[str, Any]] = None
90
+ agent: str
91
+ output_format: Literal["RAW", "JSON", "Pydantic"] = "RAW"
92
+
93
+ def json(self) -> Optional[str]:
94
+ if self.output_format == "JSON" and self.json_dict:
95
+ return json.dumps(self.json_dict)
96
+ return None
97
+
98
+ def to_dict(self) -> dict:
99
+ output_dict = {}
100
+ if self.json_dict:
101
+ output_dict.update(self.json_dict)
102
+ if self.pydantic:
103
+ output_dict.update(self.pydantic.model_dump())
104
+ return output_dict
105
+
106
+ def __str__(self):
107
+ if self.pydantic:
108
+ return str(self.pydantic)
109
+ elif self.json_dict:
110
+ return json.dumps(self.json_dict)
111
+ else:
112
+ return self.raw
@@ -0,0 +1,4 @@
1
+ """Task module for AI agent tasks"""
2
+ from .task import Task
3
+
4
+ __all__ = ['Task']
@@ -0,0 +1,48 @@
1
+ import logging
2
+ from typing import List, Optional, Dict, Any, Type
3
+ from pydantic import BaseModel
4
+ from ..main import TaskOutput
5
+ from ..agent.agent import Agent
6
+
7
+ class Task:
8
+ def __init__(
9
+ self,
10
+ description: str,
11
+ expected_output: str,
12
+ agent: Optional[Agent] = None,
13
+ name: Optional[str] = None,
14
+ tools: Optional[List[Any]] = None,
15
+ context: Optional[List["Task"]] = None,
16
+ async_execution: Optional[bool] = False,
17
+ config: Optional[Dict[str, Any]] = None,
18
+ output_file: Optional[str] = None,
19
+ output_json: Optional[Type[BaseModel]] = None,
20
+ output_pydantic: Optional[Type[BaseModel]] = None,
21
+ callback: Optional[Any] = None,
22
+ status: str = "not started",
23
+ result: Optional[TaskOutput] = None,
24
+ create_directory: Optional[bool] = False,
25
+ id: Optional[int] = None
26
+ ):
27
+ self.description = description
28
+ self.expected_output = expected_output
29
+ self.name = name
30
+ self.agent = agent
31
+ self.tools = tools if tools else []
32
+ self.context = context if context else []
33
+ self.async_execution = async_execution
34
+ self.config = config if config else {}
35
+ self.output_file = output_file
36
+ self.output_json = output_json
37
+ self.output_pydantic = output_pydantic
38
+ self.callback = callback
39
+ self.status = status
40
+ self.result = result
41
+ self.create_directory = create_directory
42
+ self.id = id
43
+
44
+ if self.output_json and self.output_pydantic:
45
+ raise ValueError("Only one output type can be defined")
46
+
47
+ def __str__(self):
48
+ return f"Task(name='{self.name if self.name else 'None'}', description='{self.description}', agent='{self.agent.name if self.agent else 'None'}', status='{self.status}')"
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: praisonaiagents
3
- Version: 0.0.3
4
- Summary: Hierarchical AI agents for task execution
3
+ Version: 0.0.5
4
+ Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
7
7
  Requires-Dist: rich
@@ -0,0 +1,20 @@
1
+ praisonaiagents/__init__.py,sha256=gI8vEabBTRPsE_E8GA5sBMi4sTtJI-YokPrH2Nor-k0,741
2
+ praisonaiagents/main.py,sha256=zDhN5KKtKbfruolDNxlyJkcFlkSt4KQkQTDRfQVAhxc,3960
3
+ praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
+ praisonaiagents/agent/agent.py,sha256=CCCjv-qtr6hSB-BG7C8l3z-pXQpnTkX9bW6me36YiaU,15512
5
+ praisonaiagents/agents/__init__.py,sha256=7RDeQNSqZg5uBjD4M_0p_F6YgfWuDuxPFydPU50kDYc,120
6
+ praisonaiagents/agents/agents.py,sha256=BoonwduUA1S13JUwr6CaPR_t3_ZfCtNJYIr7ZeXPu-8,13543
7
+ praisonaiagents/build/lib/praisonaiagents/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
8
+ praisonaiagents/build/lib/praisonaiagents/main.py,sha256=zDhN5KKtKbfruolDNxlyJkcFlkSt4KQkQTDRfQVAhxc,3960
9
+ praisonaiagents/build/lib/praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
10
+ praisonaiagents/build/lib/praisonaiagents/agent/agent.py,sha256=PwbeW6v4Ldcl10JQr9_7TBfg4_FskQh-mGoFUdGxg8w,15483
11
+ praisonaiagents/build/lib/praisonaiagents/agents/__init__.py,sha256=cgCLFLFcLp9SizmFSHUkH5aX-1seAAsRtQbtIHBBso4,101
12
+ praisonaiagents/build/lib/praisonaiagents/agents/agents.py,sha256=P2FAtlfD3kPib5a1oLVYanxlU6e4-GhBMQ0YDY5MHY4,13473
13
+ praisonaiagents/build/lib/praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
14
+ praisonaiagents/build/lib/praisonaiagents/task/task.py,sha256=4Y1qX8OeEFcid2yhAiPYylvHpuDmWORsyNL16_BiVvI,1831
15
+ praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
16
+ praisonaiagents/task/task.py,sha256=4Y1qX8OeEFcid2yhAiPYylvHpuDmWORsyNL16_BiVvI,1831
17
+ praisonaiagents-0.0.5.dist-info/METADATA,sha256=MNGv3othNwsXvB52y2RHN30tB0MFE8aHntbGzLLAaco,232
18
+ praisonaiagents-0.0.5.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
19
+ praisonaiagents-0.0.5.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
20
+ praisonaiagents-0.0.5.dist-info/RECORD,,
@@ -1,12 +0,0 @@
1
- praisonaiagents/__init__.py,sha256=479EdtuXm27prpz83bfm6DCoUfx-W0u-LNIphAlqXDc,723
2
- praisonaiagents/main.py,sha256=zDhN5KKtKbfruolDNxlyJkcFlkSt4KQkQTDRfQVAhxc,3960
3
- praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=CCCjv-qtr6hSB-BG7C8l3z-pXQpnTkX9bW6me36YiaU,15512
5
- praisonaiagents/agents/__init__.py,sha256=cgCLFLFcLp9SizmFSHUkH5aX-1seAAsRtQbtIHBBso4,101
6
- praisonaiagents/agents/agents.py,sha256=P2FAtlfD3kPib5a1oLVYanxlU6e4-GhBMQ0YDY5MHY4,13473
7
- praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
8
- praisonaiagents/task/task.py,sha256=4Y1qX8OeEFcid2yhAiPYylvHpuDmWORsyNL16_BiVvI,1831
9
- praisonaiagents-0.0.3.dist-info/METADATA,sha256=_1UEXROlBjHRlL7q3kSqPrhNiutjA2YI63avoer7sgQ,199
10
- praisonaiagents-0.0.3.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
11
- praisonaiagents-0.0.3.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
12
- praisonaiagents-0.0.3.dist-info/RECORD,,