praisonaiagents 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,9 +1,12 @@
1
- import logging
2
- import json
1
+ import os
3
2
  import time
3
+ import json
4
+ import logging
5
+ import asyncio
4
6
  from typing import List, Optional, Any, Dict, Union, Literal
5
7
  from rich.console import Console
6
8
  from rich.live import Live
9
+ from openai import AsyncOpenAI
7
10
  from ..main import (
8
11
  display_error,
9
12
  display_tool_call,
@@ -192,6 +195,12 @@ class Agent:
192
195
  self.min_reflect = min_reflect
193
196
  self.reflect_llm = reflect_llm
194
197
  self.console = Console() # Create a single console instance for the agent
198
+
199
+ # Initialize system prompt
200
+ self.system_prompt = f"""{self.backstory}\n
201
+ Your Role: {self.role}\n
202
+ Your Goal: {self.goal}
203
+ """
195
204
 
196
205
  def execute_tool(self, function_name, arguments):
197
206
  """
@@ -536,4 +545,143 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
536
545
  cleaned = cleaned[len("```"):].strip()
537
546
  if cleaned.endswith("```"):
538
547
  cleaned = cleaned[:-3].strip()
539
- return cleaned
548
+ return cleaned
549
+
550
+ async def achat(self, prompt, temperature=0.2, tools=None, output_json=None):
551
+ """Async version of chat method"""
552
+ try:
553
+ # Build system prompt
554
+ system_prompt = self.system_prompt
555
+ if output_json:
556
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_json.schema_json()}"
557
+
558
+ # Build messages
559
+ if isinstance(prompt, str):
560
+ messages = [
561
+ {"role": "system", "content": system_prompt},
562
+ {"role": "user", "content": prompt + ("\nReturn ONLY a valid JSON object. No other text or explanation." if output_json else "")}
563
+ ]
564
+ else:
565
+ # For multimodal prompts
566
+ messages = [
567
+ {"role": "system", "content": system_prompt},
568
+ {"role": "user", "content": prompt}
569
+ ]
570
+ if output_json:
571
+ # Add JSON instruction to text content
572
+ for item in messages[-1]["content"]:
573
+ if item["type"] == "text":
574
+ item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
575
+ break
576
+
577
+ # Format tools if provided
578
+ formatted_tools = []
579
+ if tools:
580
+ for tool in tools:
581
+ if isinstance(tool, str):
582
+ tool_def = self._generate_tool_definition(tool)
583
+ if tool_def:
584
+ formatted_tools.append(tool_def)
585
+ elif isinstance(tool, dict):
586
+ formatted_tools.append(tool)
587
+ elif hasattr(tool, "to_openai_tool"):
588
+ formatted_tools.append(tool.to_openai_tool())
589
+ elif callable(tool):
590
+ formatted_tools.append(self._generate_tool_definition(tool.__name__))
591
+
592
+ # Create async OpenAI client
593
+ async_client = AsyncOpenAI()
594
+
595
+ # Make the API call based on the type of request
596
+ if tools:
597
+ response = await async_client.chat.completions.create(
598
+ model=self.llm,
599
+ messages=messages,
600
+ temperature=temperature,
601
+ tools=formatted_tools
602
+ )
603
+ return await self._achat_completion(response, tools)
604
+ elif output_json:
605
+ response = await async_client.chat.completions.create(
606
+ model=self.llm,
607
+ messages=messages,
608
+ temperature=temperature,
609
+ response_format={"type": "json_object"}
610
+ )
611
+ result = response.choices[0].message.content
612
+ # Clean and parse the JSON response
613
+ cleaned_json = self.clean_json_output(result)
614
+ try:
615
+ parsed = json.loads(cleaned_json)
616
+ return output_json(**parsed)
617
+ except Exception as e:
618
+ display_error(f"Error parsing JSON response: {e}")
619
+ return None
620
+ else:
621
+ response = await async_client.chat.completions.create(
622
+ model=self.llm,
623
+ messages=messages,
624
+ temperature=temperature
625
+ )
626
+ return response.choices[0].message.content
627
+ except Exception as e:
628
+ display_error(f"Error in chat completion: {e}")
629
+ return None
630
+
631
+ async def _achat_completion(self, response, tools):
632
+ """Async version of _chat_completion method"""
633
+ try:
634
+ message = response.choices[0].message
635
+ if not hasattr(message, 'tool_calls') or not message.tool_calls:
636
+ return message.content
637
+
638
+ results = []
639
+ for tool_call in message.tool_calls:
640
+ try:
641
+ function_name = tool_call.function.name
642
+ arguments = json.loads(tool_call.function.arguments)
643
+
644
+ # Find the matching tool
645
+ tool = next((t for t in tools if t.__name__ == function_name), None)
646
+ if not tool:
647
+ display_error(f"Tool {function_name} not found")
648
+ continue
649
+
650
+ # Check if the tool is async
651
+ if asyncio.iscoroutinefunction(tool):
652
+ result = await tool(**arguments)
653
+ else:
654
+ # Run sync function in executor to avoid blocking
655
+ loop = asyncio.get_event_loop()
656
+ result = await loop.run_in_executor(None, lambda: tool(**arguments))
657
+
658
+ results.append(result)
659
+ except Exception as e:
660
+ display_error(f"Error executing tool {function_name}: {e}")
661
+ results.append(None)
662
+
663
+ # If we have results, format them into a response
664
+ if results:
665
+ formatted_results = "\n".join([str(r) for r in results if r is not None])
666
+ if formatted_results:
667
+ messages = [
668
+ {"role": "system", "content": self.system_prompt},
669
+ {"role": "assistant", "content": "Here are the tool results:"},
670
+ {"role": "user", "content": formatted_results + "\nPlease process these results and provide a final response."}
671
+ ]
672
+ try:
673
+ async_client = AsyncOpenAI()
674
+ final_response = await async_client.chat.completions.create(
675
+ model=self.llm,
676
+ messages=messages,
677
+ temperature=0.2
678
+ )
679
+ return final_response.choices[0].message.content
680
+ except Exception as e:
681
+ display_error(f"Error in final chat completion: {e}")
682
+ return formatted_results
683
+ return formatted_results
684
+ return None
685
+ except Exception as e:
686
+ display_error(f"Error in _achat_completion: {e}")
687
+ return None
@@ -10,10 +10,8 @@ from rich.console import Console
10
10
  from ..main import display_error, TaskOutput, error_logs, client
11
11
  from ..agent.agent import Agent
12
12
  from ..task.task import Task
13
- from ..process.process import Process
14
-
15
- class LoopItems(BaseModel):
16
- items: List[Any]
13
+ from ..process.process import Process, LoopItems
14
+ import asyncio
17
15
 
18
16
  def encode_file_to_base64(file_path: str) -> str:
19
17
  """Base64-encode a file."""
@@ -85,7 +83,8 @@ class PraisonAIAgents:
85
83
  return True
86
84
  return len(agent_output.strip()) > 0
87
85
 
88
- def execute_task(self, task_id):
86
+ async def aexecute_task(self, task_id):
87
+ """Async version of execute_task method"""
89
88
  if task_id not in self.tasks:
90
89
  display_error(f"Error: Task with ID {task_id} does not exist")
91
90
  return
@@ -162,12 +161,12 @@ Expected Output: {task.expected_output}.
162
161
  })
163
162
  return content
164
163
 
165
- agent_output = executor_agent.chat(
164
+ agent_output = await executor_agent.achat(
166
165
  _get_multimodal_message(task_prompt, task.images),
167
166
  tools=task.tools
168
167
  )
169
168
  else:
170
- agent_output = executor_agent.chat(task_prompt, tools=task.tools)
169
+ agent_output = await executor_agent.achat(task_prompt, tools=task.tools)
171
170
 
172
171
  if agent_output:
173
172
  task_output = TaskOutput(
@@ -205,6 +204,83 @@ Expected Output: {task.expected_output}.
205
204
  task.status = "failed"
206
205
  return None
207
206
 
207
+ async def arun_task(self, task_id):
208
+ """Async version of run_task method"""
209
+ if task_id not in self.tasks:
210
+ display_error(f"Error: Task with ID {task_id} does not exist")
211
+ return
212
+ task = self.tasks[task_id]
213
+ if task.status == "completed":
214
+ logging.info(f"Task with ID {task_id} is already completed")
215
+ return
216
+
217
+ retries = 0
218
+ while task.status != "completed" and retries < self.max_retries:
219
+ logging.debug(f"Attempt {retries+1} for task {task_id}")
220
+ if task.status in ["not started", "in progress"]:
221
+ task_output = await self.aexecute_task(task_id)
222
+ if task_output and self.completion_checker(task, task_output.raw):
223
+ task.status = "completed"
224
+ if task.callback:
225
+ await task.execute_callback(task_output)
226
+ self.save_output_to_file(task, task_output)
227
+ if self.verbose >= 1:
228
+ logging.info(f"Task {task_id} completed successfully.")
229
+ else:
230
+ task.status = "in progress"
231
+ if self.verbose >= 1:
232
+ logging.info(f"Task {task_id} not completed, retrying")
233
+ await asyncio.sleep(1)
234
+ retries += 1
235
+ else:
236
+ if task.status == "failed":
237
+ logging.info("Task is failed, resetting to in-progress for another try...")
238
+ task.status = "in progress"
239
+ else:
240
+ logging.info("Invalid Task status")
241
+ break
242
+
243
+ if retries == self.max_retries and task.status != "completed":
244
+ logging.info(f"Task {task_id} failed after {self.max_retries} retries.")
245
+
246
+ async def arun_all_tasks(self):
247
+ """Async version of run_all_tasks method"""
248
+ process = Process(
249
+ tasks=self.tasks,
250
+ agents=self.agents,
251
+ manager_llm=self.manager_llm,
252
+ verbose=self.verbose
253
+ )
254
+
255
+ if self.process == "workflow":
256
+ async for task_id in process.aworkflow():
257
+ if self.tasks[task_id].async_execution:
258
+ await self.arun_task(task_id)
259
+ else:
260
+ self.run_task(task_id)
261
+ elif self.process == "sequential":
262
+ async for task_id in process.asequential():
263
+ if self.tasks[task_id].async_execution:
264
+ await self.arun_task(task_id)
265
+ else:
266
+ self.run_task(task_id)
267
+ elif self.process == "hierarchical":
268
+ async for task_id in process.ahierarchical():
269
+ if isinstance(task_id, Task):
270
+ task_id = self.add_task(task_id)
271
+ if self.tasks[task_id].async_execution:
272
+ await self.arun_task(task_id)
273
+ else:
274
+ self.run_task(task_id)
275
+
276
+ async def astart(self):
277
+ """Async version of start method"""
278
+ await self.arun_all_tasks()
279
+ return {
280
+ "task_status": self.get_all_tasks_status(),
281
+ "task_results": {task_id: self.get_task_result(task_id) for task_id in self.tasks}
282
+ }
283
+
208
284
  def save_output_to_file(self, task, task_output):
209
285
  if task.output_file:
210
286
  try:
@@ -217,7 +293,129 @@ Expected Output: {task.expected_output}.
217
293
  except Exception as e:
218
294
  display_error(f"Error saving task output to file: {e}")
219
295
 
296
+ def execute_task(self, task_id):
297
+ """Synchronous version of execute_task method"""
298
+ if task_id not in self.tasks:
299
+ display_error(f"Error: Task with ID {task_id} does not exist")
300
+ return
301
+ task = self.tasks[task_id]
302
+
303
+ # Only import multimodal dependencies if task has images
304
+ if task.images and task.status == "not started":
305
+ try:
306
+ import cv2
307
+ import base64
308
+ from moviepy import VideoFileClip
309
+ except ImportError as e:
310
+ display_error(f"Error: Missing required dependencies for image/video processing: {e}")
311
+ display_error("Please install with: pip install opencv-python moviepy")
312
+ task.status = "failed"
313
+ return None
314
+
315
+ if task.status == "not started":
316
+ task.status = "in progress"
317
+
318
+ executor_agent = task.agent
319
+
320
+ task_prompt = f"""
321
+ You need to do the following task: {task.description}.
322
+ Expected Output: {task.expected_output}.
323
+ """
324
+ if task.context:
325
+ context_results = ""
326
+ for context_task in task.context:
327
+ if context_task.result:
328
+ context_results += f"Result of previous task {context_task.name if context_task.name else context_task.description}: {context_task.result.raw}\n"
329
+ else:
330
+ context_results += f"Previous task {context_task.name if context_task.name else context_task.description} had no result.\n"
331
+ task_prompt += f"""
332
+ Here are the results of previous tasks that might be useful:\n
333
+ {context_results}
334
+ """
335
+ task_prompt += "Please provide only the final result of your work. Do not add any conversation or extra explanation."
336
+
337
+ if self.verbose >= 2:
338
+ logging.info(f"Executing task {task_id}: {task.description} using {executor_agent.name}")
339
+ logging.debug(f"Starting execution of task {task_id} with prompt:\n{task_prompt}")
340
+
341
+ if task.images:
342
+ def _get_multimodal_message(text_prompt, images):
343
+ content = [{"type": "text", "text": text_prompt}]
344
+
345
+ for img in images:
346
+ # If local file path for a valid image
347
+ if os.path.exists(img):
348
+ ext = os.path.splitext(img)[1].lower()
349
+ # If it's a .mp4, convert to frames
350
+ if ext == ".mp4":
351
+ frames = process_video(img, seconds_per_frame=1)
352
+ content.append({"type": "text", "text": "These are frames from the video."})
353
+ for f in frames:
354
+ content.append({
355
+ "type": "image_url",
356
+ "image_url": {"url": f"data:image/jpg;base64,{f}"}
357
+ })
358
+ else:
359
+ encoded = encode_file_to_base64(img)
360
+ content.append({
361
+ "type": "image_url",
362
+ "image_url": {
363
+ "url": f"data:image/{ext.lstrip('.')};base64,{encoded}"
364
+ }
365
+ })
366
+ else:
367
+ # Treat as a remote URL
368
+ content.append({
369
+ "type": "image_url",
370
+ "image_url": {"url": img}
371
+ })
372
+ return content
373
+
374
+ agent_output = executor_agent.chat(
375
+ _get_multimodal_message(task_prompt, task.images),
376
+ tools=task.tools
377
+ )
378
+ else:
379
+ agent_output = executor_agent.chat(task_prompt, tools=task.tools)
380
+
381
+ if agent_output:
382
+ task_output = TaskOutput(
383
+ description=task.description,
384
+ summary=task.description[:10],
385
+ raw=agent_output,
386
+ agent=executor_agent.name,
387
+ output_format="RAW"
388
+ )
389
+
390
+ if task.output_json:
391
+ cleaned = self.clean_json_output(agent_output)
392
+ try:
393
+ parsed = json.loads(cleaned)
394
+ task_output.json_dict = parsed
395
+ task_output.output_format = "JSON"
396
+ except:
397
+ logging.warning(f"Warning: Could not parse output of task {task_id} as JSON")
398
+ logging.debug(f"Output that failed JSON parsing: {agent_output}")
399
+
400
+ if task.output_pydantic:
401
+ cleaned = self.clean_json_output(agent_output)
402
+ try:
403
+ parsed = json.loads(cleaned)
404
+ pyd_obj = task.output_pydantic(**parsed)
405
+ task_output.pydantic = pyd_obj
406
+ task_output.output_format = "Pydantic"
407
+ except:
408
+ logging.warning(f"Warning: Could not parse output of task {task_id} as Pydantic Model")
409
+ logging.debug(f"Output that failed Pydantic parsing: {agent_output}")
410
+
411
+ task.result = task_output
412
+ return task_output
413
+ else:
414
+ task.status = "failed"
415
+ return None
416
+
220
417
  def run_task(self, task_id):
418
+ """Synchronous version of run_task method"""
221
419
  if task_id not in self.tasks:
222
420
  display_error(f"Error: Task with ID {task_id} does not exist")
223
421
  return
@@ -256,7 +454,7 @@ Expected Output: {task.expected_output}.
256
454
  logging.info(f"Task {task_id} failed after {self.max_retries} retries.")
257
455
 
258
456
  def run_all_tasks(self):
259
- """Execute tasks based on execution mode"""
457
+ """Synchronous version of run_all_tasks method"""
260
458
  process = Process(
261
459
  tasks=self.tasks,
262
460
  agents=self.agents,
@@ -1,11 +1,13 @@
1
1
  import logging
2
- from typing import Dict, Optional, List
2
+ import asyncio
3
+ from typing import Dict, Optional, List, Any, AsyncGenerator
3
4
  from pydantic import BaseModel
4
5
  from ..agent.agent import Agent
5
6
  from ..task.task import Task
7
+ from ..main import display_error, client
6
8
 
7
9
  class LoopItems(BaseModel):
8
- items: List[any]
10
+ items: List[Any]
9
11
 
10
12
  class Process:
11
13
  def __init__(self, tasks: Dict[str, Task], agents: List[Agent], manager_llm: Optional[str] = None, verbose: bool = False):
@@ -14,7 +16,275 @@ class Process:
14
16
  self.manager_llm = manager_llm
15
17
  self.verbose = verbose
16
18
 
19
+ async def aworkflow(self) -> AsyncGenerator[str, None]:
20
+ """Async version of workflow method"""
21
+ # Build workflow relationships first
22
+ for task in self.tasks.values():
23
+ if task.next_tasks:
24
+ for next_task_name in task.next_tasks:
25
+ next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
26
+ if next_task:
27
+ next_task.previous_tasks.append(task.name)
28
+
29
+ # Find start task
30
+ start_task = None
31
+ for task_id, task in self.tasks.items():
32
+ if task.is_start:
33
+ start_task = task
34
+ break
35
+
36
+ if not start_task:
37
+ start_task = list(self.tasks.values())[0]
38
+ logging.info("No start task marked, using first task")
39
+
40
+ current_task = start_task
41
+ visited_tasks = set()
42
+ loop_data = {} # Store loop-specific data
43
+
44
+ while current_task and current_task.id not in visited_tasks:
45
+ task_id = current_task.id
46
+ logging.info(f"Executing workflow task: {current_task.name if current_task.name else task_id}")
47
+
48
+ # Add context from previous tasks to description
49
+ if current_task.previous_tasks or current_task.context:
50
+ context = "\nInput data from previous tasks:"
51
+
52
+ # Add data from previous tasks in workflow
53
+ for prev_name in current_task.previous_tasks:
54
+ prev_task = next((t for t in self.tasks.values() if t.name == prev_name), None)
55
+ if prev_task and prev_task.result:
56
+ # Handle loop data
57
+ if current_task.task_type == "loop":
58
+ # create a loop manager Agent
59
+ loop_manager = Agent(
60
+ name="Loop Manager",
61
+ role="Loop data processor",
62
+ goal="Process loop data and convert it to list format",
63
+ backstory="Expert at handling loop data and converting it to proper format",
64
+ llm=self.manager_llm,
65
+ verbose=self.verbose,
66
+ markdown=True
67
+ )
68
+
69
+ # get the loop data convert it to list using calling Agent class chat
70
+ loop_prompt = f"""
71
+ Process this data into a list format:
72
+ {prev_task.result.raw}
73
+
74
+ Return a JSON object with an 'items' array containing the items to process.
75
+ """
76
+ if current_task.async_execution:
77
+ loop_data_str = await loop_manager.achat(
78
+ prompt=loop_prompt,
79
+ output_json=LoopItems
80
+ )
81
+ else:
82
+ loop_data_str = loop_manager.chat(
83
+ prompt=loop_prompt,
84
+ output_json=LoopItems
85
+ )
86
+
87
+ try:
88
+ # The response will already be parsed into LoopItems model
89
+ loop_data[f"loop_{current_task.name}"] = {
90
+ "items": loop_data_str.items,
91
+ "index": 0,
92
+ "remaining": len(loop_data_str.items)
93
+ }
94
+ context += f"\nCurrent loop item: {loop_data_str.items[0]}"
95
+ except Exception as e:
96
+ display_error(f"Failed to process loop data: {e}")
97
+ context += f"\n{prev_name}: {prev_task.result.raw}"
98
+ else:
99
+ context += f"\n{prev_name}: {prev_task.result.raw}"
100
+
101
+ # Add data from context tasks
102
+ if current_task.context:
103
+ for ctx_task in current_task.context:
104
+ if ctx_task.result and ctx_task.name != current_task.name:
105
+ context += f"\n{ctx_task.name}: {ctx_task.result.raw}"
106
+
107
+ # Update task description with context
108
+ current_task.description = current_task.description + context
109
+
110
+ # Execute task using existing run_task method
111
+ yield task_id
112
+ visited_tasks.add(task_id)
113
+
114
+ # Handle loop progression
115
+ if current_task.task_type == "loop":
116
+ loop_key = f"loop_{current_task.name}"
117
+ if loop_key in loop_data:
118
+ loop_info = loop_data[loop_key]
119
+ loop_info["index"] += 1
120
+ has_more = loop_info["remaining"] > 0
121
+
122
+ # Update result to trigger correct condition
123
+ if current_task.result:
124
+ result = current_task.result.raw
125
+ if has_more:
126
+ result += "\nmore"
127
+ else:
128
+ result += "\ndone"
129
+ current_task.result.raw = result
130
+
131
+ # Determine next task based on result
132
+ next_task = None
133
+ if current_task.result:
134
+ if current_task.task_type in ["decision", "loop"]:
135
+ result = current_task.result.raw.lower()
136
+ # Check conditions
137
+ for condition, tasks in current_task.condition.items():
138
+ if condition.lower() in result and tasks:
139
+ next_task_name = tasks[0]
140
+ next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
141
+ # For loops, allow revisiting the same task
142
+ if next_task and next_task.id == current_task.id:
143
+ visited_tasks.discard(current_task.id)
144
+ break
145
+
146
+ if not next_task and current_task.next_tasks:
147
+ next_task_name = current_task.next_tasks[0]
148
+ next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
149
+
150
+ current_task = next_task
151
+ if not current_task:
152
+ logging.info("Workflow execution completed")
153
+ break
154
+
155
+ async def asequential(self) -> AsyncGenerator[str, None]:
156
+ """Async version of sequential method"""
157
+ for task_id in self.tasks:
158
+ if self.tasks[task_id].status != "completed":
159
+ yield task_id
160
+
161
+ async def ahierarchical(self) -> AsyncGenerator[str, None]:
162
+ """Async version of hierarchical method"""
163
+ logging.debug(f"Starting hierarchical task execution with {len(self.tasks)} tasks")
164
+ manager_agent = Agent(
165
+ name="Manager",
166
+ role="Project manager",
167
+ goal="Manage the entire flow of tasks and delegate them to the right agent",
168
+ backstory="Expert project manager to coordinate tasks among agents",
169
+ llm=self.manager_llm,
170
+ verbose=self.verbose,
171
+ markdown=True,
172
+ self_reflect=False
173
+ )
174
+
175
+ class ManagerInstructions(BaseModel):
176
+ task_id: int
177
+ agent_name: str
178
+ action: str
179
+
180
+ manager_task = Task(
181
+ name="manager_task",
182
+ description="Decide the order of tasks and which agent executes them",
183
+ expected_output="All tasks completed successfully",
184
+ agent=manager_agent
185
+ )
186
+ manager_task_id = yield manager_task
187
+ logging.info(f"Created manager task with ID {manager_task_id}")
188
+
189
+ completed_count = 0
190
+ total_tasks = len(self.tasks) - 1
191
+ logging.info(f"Need to complete {total_tasks} tasks (excluding manager task)")
192
+
193
+ while completed_count < total_tasks:
194
+ tasks_summary = []
195
+ for tid, tk in self.tasks.items():
196
+ if tk.name == "manager_task":
197
+ continue
198
+ task_info = {
199
+ "task_id": tid,
200
+ "name": tk.name,
201
+ "description": tk.description,
202
+ "status": tk.status if tk.status else "not started",
203
+ "agent": tk.agent.name if tk.agent else "No agent"
204
+ }
205
+ tasks_summary.append(task_info)
206
+ logging.info(f"Task {tid} status: {task_info}")
207
+
208
+ manager_prompt = f"""
209
+ Here is the current status of all tasks except yours (manager_task):
210
+ {tasks_summary}
211
+
212
+ Provide a JSON with the structure:
213
+ {{
214
+ "task_id": <int>,
215
+ "agent_name": "<string>",
216
+ "action": "<execute or stop>"
217
+ }}
218
+ """
219
+
220
+ try:
221
+ logging.info("Requesting manager instructions...")
222
+ if manager_task.async_execution:
223
+ manager_response = await client.beta.chat.completions.parse(
224
+ model=self.manager_llm,
225
+ messages=[
226
+ {"role": "system", "content": manager_task.description},
227
+ {"role": "user", "content": manager_prompt}
228
+ ],
229
+ temperature=0.7,
230
+ response_format=ManagerInstructions
231
+ )
232
+ else:
233
+ manager_response = client.beta.chat.completions.parse(
234
+ model=self.manager_llm,
235
+ messages=[
236
+ {"role": "system", "content": manager_task.description},
237
+ {"role": "user", "content": manager_prompt}
238
+ ],
239
+ temperature=0.7,
240
+ response_format=ManagerInstructions
241
+ )
242
+ parsed_instructions = manager_response.choices[0].message.parsed
243
+ logging.info(f"Manager instructions: {parsed_instructions}")
244
+ except Exception as e:
245
+ display_error(f"Manager parse error: {e}")
246
+ logging.error(f"Manager parse error: {str(e)}", exc_info=True)
247
+ break
248
+
249
+ selected_task_id = parsed_instructions.task_id
250
+ selected_agent_name = parsed_instructions.agent_name
251
+ action = parsed_instructions.action
252
+
253
+ logging.info(f"Manager selected task_id={selected_task_id}, agent={selected_agent_name}, action={action}")
254
+
255
+ if action.lower() == "stop":
256
+ logging.info("Manager decided to stop task execution")
257
+ break
258
+
259
+ if selected_task_id not in self.tasks:
260
+ error_msg = f"Manager selected invalid task id {selected_task_id}"
261
+ display_error(error_msg)
262
+ logging.error(error_msg)
263
+ break
264
+
265
+ original_agent = self.tasks[selected_task_id].agent.name if self.tasks[selected_task_id].agent else "None"
266
+ for a in self.agents:
267
+ if a.name == selected_agent_name:
268
+ self.tasks[selected_task_id].agent = a
269
+ logging.info(f"Changed agent for task {selected_task_id} from {original_agent} to {selected_agent_name}")
270
+ break
271
+
272
+ if self.tasks[selected_task_id].status != "completed":
273
+ logging.info(f"Starting execution of task {selected_task_id}")
274
+ yield selected_task_id
275
+ logging.info(f"Finished execution of task {selected_task_id}, status: {self.tasks[selected_task_id].status}")
276
+
277
+ if self.tasks[selected_task_id].status == "completed":
278
+ completed_count += 1
279
+ logging.info(f"Task {selected_task_id} completed. Total completed: {completed_count}/{total_tasks}")
280
+
281
+ self.tasks[manager_task.id].status = "completed"
282
+ if self.verbose >= 1:
283
+ logging.info("All tasks completed under manager supervision.")
284
+ logging.info("Hierarchical task execution finished")
285
+
17
286
  def workflow(self):
287
+ """Synchronous version of workflow method"""
18
288
  # Build workflow relationships first
19
289
  for task in self.tasks.values():
20
290
  if task.next_tasks:
@@ -144,11 +414,13 @@ Return a JSON object with an 'items' array containing the items to process.
144
414
  break
145
415
 
146
416
  def sequential(self):
417
+ """Synchronous version of sequential method"""
147
418
  for task_id in self.tasks:
148
419
  if self.tasks[task_id].status != "completed":
149
420
  yield task_id
150
421
 
151
422
  def hierarchical(self):
423
+ """Synchronous version of hierarchical method"""
152
424
  logging.debug(f"Starting hierarchical task execution with {len(self.tasks)} tasks")
153
425
  manager_agent = Agent(
154
426
  name="Manager",
@@ -1,5 +1,6 @@
1
1
  import logging
2
- from typing import List, Optional, Dict, Any, Type, Callable, Union
2
+ import asyncio
3
+ from typing import List, Optional, Dict, Any, Type, Callable, Union, Coroutine
3
4
  from pydantic import BaseModel
4
5
  from ..main import TaskOutput
5
6
  from ..agent.agent import Agent
@@ -19,7 +20,7 @@ class Task:
19
20
  output_file: Optional[str] = None,
20
21
  output_json: Optional[Type[BaseModel]] = None,
21
22
  output_pydantic: Optional[Type[BaseModel]] = None,
22
- callback: Optional[Any] = None,
23
+ callback: Optional[Union[Callable[[TaskOutput], Any], Callable[[TaskOutput], Coroutine[Any, Any, Any]]]] = None,
23
24
  status: str = "not started",
24
25
  result: Optional[TaskOutput] = None,
25
26
  create_directory: Optional[bool] = False,
@@ -61,4 +62,13 @@ class Task:
61
62
  self.previous_tasks = []
62
63
 
63
64
  def __str__(self):
64
- return f"Task(name='{self.name if self.name else 'None'}', description='{self.description}', agent='{self.agent.name if self.agent else 'None'}', status='{self.status}')"
65
+ return f"Task(name='{self.name if self.name else 'None'}', description='{self.description}', agent='{self.agent.name if self.agent else 'None'}', status='{self.status}')"
66
+
67
+ async def execute_callback(self, task_output: TaskOutput) -> None:
68
+ """Execute the callback function, handling both sync and async callbacks"""
69
+ if self.callback:
70
+ if asyncio.iscoroutinefunction(self.callback):
71
+ await self.callback(task_output)
72
+ else:
73
+ loop = asyncio.get_event_loop()
74
+ await loop.run_in_executor(None, self.callback, task_output)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: praisonaiagents
3
- Version: 0.0.15
3
+ Version: 0.0.17
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,9 +1,9 @@
1
1
  praisonaiagents/__init__.py,sha256=gI8vEabBTRPsE_E8GA5sBMi4sTtJI-YokPrH2Nor-k0,741
2
2
  praisonaiagents/main.py,sha256=K2OxVKPmo4dNJbSWIsXDi_hm9CRx5O4km_74UGcszhk,5744
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=zTYcDpJ5DzzBnefwLvhrtBlGQoRI4ZZAioDu5nKTPSs,24042
4
+ praisonaiagents/agent/agent.py,sha256=_UmUWGbZjd3tApPX2T6RPB5Pll3Gos97XBhhg_zmfn8,30662
5
5
  praisonaiagents/agents/__init__.py,sha256=7RDeQNSqZg5uBjD4M_0p_F6YgfWuDuxPFydPU50kDYc,120
6
- praisonaiagents/agents/agents.py,sha256=ngPFNTmv3whf22litkQacUGaRghX-NbLPAC6Ejy9qoU,13003
6
+ praisonaiagents/agents/agents.py,sha256=P8JJ1849-djMDkMuP0kNhPwtg97L8gO60jYXzXFcPc0,21762
7
7
  praisonaiagents/build/lib/praisonaiagents/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
8
8
  praisonaiagents/build/lib/praisonaiagents/main.py,sha256=zDhN5KKtKbfruolDNxlyJkcFlkSt4KQkQTDRfQVAhxc,3960
9
9
  praisonaiagents/build/lib/praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
@@ -13,10 +13,10 @@ praisonaiagents/build/lib/praisonaiagents/agents/agents.py,sha256=P2FAtlfD3kPib5
13
13
  praisonaiagents/build/lib/praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
14
14
  praisonaiagents/build/lib/praisonaiagents/task/task.py,sha256=4Y1qX8OeEFcid2yhAiPYylvHpuDmWORsyNL16_BiVvI,1831
15
15
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
16
- praisonaiagents/process/process.py,sha256=BgtFgTQjLoqHzj97zDtALjuP_ciOErMDB9quDTlZFjg,11676
16
+ praisonaiagents/process/process.py,sha256=4qXdrCDQPH5MtvHvdJVURXKNgSl6ae3OYTiqAF_A2ZU,24295
17
17
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
18
- praisonaiagents/task/task.py,sha256=oMC5Zz1dMj0Ceice69aBS1KQQXMLqphc8wNOQ9zcu0Q,2570
19
- praisonaiagents-0.0.15.dist-info/METADATA,sha256=QERXlBCD9drRfFrI5GwbLzbIaLX_FnOM9UVxFwIwDBo,233
20
- praisonaiagents-0.0.15.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
21
- praisonaiagents-0.0.15.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
22
- praisonaiagents-0.0.15.dist-info/RECORD,,
18
+ praisonaiagents/task/task.py,sha256=UiiWgLDOdX_w0opP8h8-u-leVZlq1CkpGUmf7L2qyJs,3110
19
+ praisonaiagents-0.0.17.dist-info/METADATA,sha256=oh0bnTHpGDwnn15L-t8v4kdApFBwOZ_BKj24mGnJUqA,233
20
+ praisonaiagents-0.0.17.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
21
+ praisonaiagents-0.0.17.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
22
+ praisonaiagents-0.0.17.dist-info/RECORD,,