praisonaiagents 0.0.13__py3-none-any.whl → 0.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +52 -8
- praisonaiagents/agents/agents.py +41 -117
- praisonaiagents/process/__init__.py +3 -0
- praisonaiagents/process/process.py +262 -0
- praisonaiagents/task/task.py +18 -4
- {praisonaiagents-0.0.13.dist-info → praisonaiagents-0.0.15.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.13.dist-info → praisonaiagents-0.0.15.dist-info}/RECORD +9 -7
- {praisonaiagents-0.0.13.dist-info → praisonaiagents-0.0.15.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.13.dist-info → praisonaiagents-0.0.15.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -353,13 +353,28 @@ class Agent:
|
|
353
353
|
Your Role: {self.role}\n
|
354
354
|
Your Goal: {self.goal}
|
355
355
|
"""
|
356
|
+
if output_json:
|
357
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_json.schema_json()}"
|
356
358
|
else:
|
357
359
|
system_prompt = None
|
358
|
-
|
360
|
+
|
359
361
|
messages = []
|
360
362
|
if system_prompt:
|
361
363
|
messages.append({"role": "system", "content": system_prompt})
|
362
364
|
messages.extend(self.chat_history)
|
365
|
+
|
366
|
+
# Modify prompt if output_json is specified
|
367
|
+
original_prompt = prompt
|
368
|
+
if output_json:
|
369
|
+
if isinstance(prompt, str):
|
370
|
+
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
371
|
+
elif isinstance(prompt, list):
|
372
|
+
# For multimodal prompts, append to the text content
|
373
|
+
for item in prompt:
|
374
|
+
if item["type"] == "text":
|
375
|
+
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
376
|
+
break
|
377
|
+
|
363
378
|
if isinstance(prompt, list):
|
364
379
|
# If we receive a multimodal prompt list, place it directly in the user message
|
365
380
|
messages.append({"role": "user", "content": prompt})
|
@@ -385,13 +400,14 @@ Your Goal: {self.goal}
|
|
385
400
|
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None)
|
386
401
|
if not response:
|
387
402
|
return None
|
388
|
-
|
403
|
+
|
389
404
|
tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
|
405
|
+
response_text = response.choices[0].message.content.strip()
|
390
406
|
|
391
407
|
if tool_calls:
|
392
408
|
messages.append({
|
393
409
|
"role": "assistant",
|
394
|
-
"content":
|
410
|
+
"content": response_text,
|
395
411
|
"tool_calls": tool_calls
|
396
412
|
})
|
397
413
|
|
@@ -423,15 +439,31 @@ Your Goal: {self.goal}
|
|
423
439
|
if not response:
|
424
440
|
return None
|
425
441
|
response_text = response.choices[0].message.content.strip()
|
426
|
-
|
427
|
-
|
442
|
+
|
443
|
+
# Handle output_json if specified
|
444
|
+
if output_json:
|
445
|
+
try:
|
446
|
+
# Clean the response text to get only JSON
|
447
|
+
cleaned_json = self.clean_json_output(response_text)
|
448
|
+
# Parse into Pydantic model
|
449
|
+
parsed_model = output_json.model_validate_json(cleaned_json)
|
450
|
+
# Add to chat history and return
|
451
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
452
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
453
|
+
if self.verbose:
|
454
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
455
|
+
generation_time=time.time() - start_time, console=self.console)
|
456
|
+
return parsed_model
|
457
|
+
except Exception as e:
|
458
|
+
display_error(f"Failed to parse response as {output_json.__name__}: {e}")
|
459
|
+
return None
|
428
460
|
|
429
461
|
if not self.self_reflect:
|
430
|
-
self.chat_history.append({"role": "user", "content":
|
462
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
431
463
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
432
464
|
if self.verbose:
|
433
465
|
logging.info(f"Agent {self.name} final response: {response_text}")
|
434
|
-
display_interaction(
|
466
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
435
467
|
return response_text
|
436
468
|
|
437
469
|
reflection_prompt = f"""
|
@@ -492,4 +524,16 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
492
524
|
|
493
525
|
except Exception as e:
|
494
526
|
display_error(f"Error in chat: {e}", console=self.console)
|
495
|
-
return None
|
527
|
+
return None
|
528
|
+
|
529
|
+
def clean_json_output(self, output: str) -> str:
|
530
|
+
"""Clean and extract JSON from response text."""
|
531
|
+
cleaned = output.strip()
|
532
|
+
# Remove markdown code blocks if present
|
533
|
+
if cleaned.startswith("```json"):
|
534
|
+
cleaned = cleaned[len("```json"):].strip()
|
535
|
+
if cleaned.startswith("```"):
|
536
|
+
cleaned = cleaned[len("```"):].strip()
|
537
|
+
if cleaned.endswith("```"):
|
538
|
+
cleaned = cleaned[:-3].strip()
|
539
|
+
return cleaned
|
praisonaiagents/agents/agents.py
CHANGED
@@ -2,7 +2,7 @@ import os
|
|
2
2
|
import time
|
3
3
|
import json
|
4
4
|
import logging
|
5
|
-
from typing import Any, Dict, Optional
|
5
|
+
from typing import Any, Dict, Optional, List
|
6
6
|
from pydantic import BaseModel
|
7
7
|
from rich.text import Text
|
8
8
|
from rich.panel import Panel
|
@@ -10,6 +10,10 @@ from rich.console import Console
|
|
10
10
|
from ..main import display_error, TaskOutput, error_logs, client
|
11
11
|
from ..agent.agent import Agent
|
12
12
|
from ..task.task import Task
|
13
|
+
from ..process.process import Process
|
14
|
+
|
15
|
+
class LoopItems(BaseModel):
|
16
|
+
items: List[Any]
|
13
17
|
|
14
18
|
def encode_file_to_base64(file_path: str) -> str:
|
15
19
|
"""Base64-encode a file."""
|
@@ -55,6 +59,7 @@ class PraisonAIAgents:
|
|
55
59
|
for task in tasks:
|
56
60
|
self.add_task(task)
|
57
61
|
task.status = "not started"
|
62
|
+
self._state = {} # Add state storage at PraisonAIAgents level
|
58
63
|
|
59
64
|
def add_task(self, task):
|
60
65
|
task_id = self.task_id_counter
|
@@ -251,122 +256,25 @@ Expected Output: {task.expected_output}.
|
|
251
256
|
logging.info(f"Task {task_id} failed after {self.max_retries} retries.")
|
252
257
|
|
253
258
|
def run_all_tasks(self):
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
259
|
+
"""Execute tasks based on execution mode"""
|
260
|
+
process = Process(
|
261
|
+
tasks=self.tasks,
|
262
|
+
agents=self.agents,
|
263
|
+
manager_llm=self.manager_llm,
|
264
|
+
verbose=self.verbose
|
265
|
+
)
|
266
|
+
|
267
|
+
if self.process == "workflow":
|
268
|
+
for task_id in process.workflow():
|
269
|
+
self.run_task(task_id)
|
270
|
+
elif self.process == "sequential":
|
271
|
+
for task_id in process.sequential():
|
272
|
+
self.run_task(task_id)
|
258
273
|
elif self.process == "hierarchical":
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
goal="Manage the entire flow of tasks and delegate them to the right agent",
|
264
|
-
backstory="Expert project manager to coordinate tasks among agents",
|
265
|
-
llm=self.manager_llm,
|
266
|
-
verbose=self.verbose,
|
267
|
-
markdown=True,
|
268
|
-
self_reflect=False
|
269
|
-
)
|
270
|
-
|
271
|
-
class ManagerInstructions(BaseModel):
|
272
|
-
task_id: int
|
273
|
-
agent_name: str
|
274
|
-
action: str
|
275
|
-
|
276
|
-
manager_task = Task(
|
277
|
-
name="manager_task",
|
278
|
-
description="Decide the order of tasks and which agent executes them",
|
279
|
-
expected_output="All tasks completed successfully",
|
280
|
-
agent=manager_agent
|
281
|
-
)
|
282
|
-
manager_task_id = self.add_task(manager_task)
|
283
|
-
logging.info(f"Created manager task with ID {manager_task_id}")
|
284
|
-
|
285
|
-
completed_count = 0
|
286
|
-
total_tasks = len(self.tasks) - 1
|
287
|
-
logging.info(f"Need to complete {total_tasks} tasks (excluding manager task)")
|
288
|
-
|
289
|
-
while completed_count < total_tasks:
|
290
|
-
tasks_summary = []
|
291
|
-
for tid, tk in self.tasks.items():
|
292
|
-
if tk.name == "manager_task":
|
293
|
-
continue
|
294
|
-
task_info = {
|
295
|
-
"task_id": tid,
|
296
|
-
"name": tk.name,
|
297
|
-
"description": tk.description,
|
298
|
-
"status": tk.status if tk.status else "not started",
|
299
|
-
"agent": tk.agent.name if tk.agent else "No agent"
|
300
|
-
}
|
301
|
-
tasks_summary.append(task_info)
|
302
|
-
logging.info(f"Task {tid} status: {task_info}")
|
303
|
-
|
304
|
-
manager_prompt = f"""
|
305
|
-
Here is the current status of all tasks except yours (manager_task):
|
306
|
-
{tasks_summary}
|
307
|
-
|
308
|
-
Provide a JSON with the structure:
|
309
|
-
{{
|
310
|
-
"task_id": <int>,
|
311
|
-
"agent_name": "<string>",
|
312
|
-
"action": "<execute or stop>"
|
313
|
-
}}
|
314
|
-
"""
|
315
|
-
|
316
|
-
try:
|
317
|
-
logging.info("Requesting manager instructions...")
|
318
|
-
manager_response = client.beta.chat.completions.parse(
|
319
|
-
model=self.manager_llm,
|
320
|
-
messages=[
|
321
|
-
{"role": "system", "content": manager_task.description},
|
322
|
-
{"role": "user", "content": manager_prompt}
|
323
|
-
],
|
324
|
-
temperature=0.7,
|
325
|
-
response_format=ManagerInstructions
|
326
|
-
)
|
327
|
-
parsed_instructions = manager_response.choices[0].message.parsed
|
328
|
-
logging.info(f"Manager instructions: {parsed_instructions}")
|
329
|
-
except Exception as e:
|
330
|
-
display_error(f"Manager parse error: {e}")
|
331
|
-
logging.error(f"Manager parse error: {str(e)}", exc_info=True)
|
332
|
-
break
|
333
|
-
|
334
|
-
selected_task_id = parsed_instructions.task_id
|
335
|
-
selected_agent_name = parsed_instructions.agent_name
|
336
|
-
action = parsed_instructions.action
|
337
|
-
|
338
|
-
logging.info(f"Manager selected task_id={selected_task_id}, agent={selected_agent_name}, action={action}")
|
339
|
-
|
340
|
-
if action.lower() == "stop":
|
341
|
-
logging.info("Manager decided to stop task execution")
|
342
|
-
break
|
343
|
-
|
344
|
-
if selected_task_id not in self.tasks:
|
345
|
-
error_msg = f"Manager selected invalid task id {selected_task_id}"
|
346
|
-
display_error(error_msg)
|
347
|
-
logging.error(error_msg)
|
348
|
-
break
|
349
|
-
|
350
|
-
original_agent = self.tasks[selected_task_id].agent.name if self.tasks[selected_task_id].agent else "None"
|
351
|
-
for a in self.agents:
|
352
|
-
if a.name == selected_agent_name:
|
353
|
-
self.tasks[selected_task_id].agent = a
|
354
|
-
logging.info(f"Changed agent for task {selected_task_id} from {original_agent} to {selected_agent_name}")
|
355
|
-
break
|
356
|
-
|
357
|
-
if self.tasks[selected_task_id].status != "completed":
|
358
|
-
logging.info(f"Starting execution of task {selected_task_id}")
|
359
|
-
self.run_task(selected_task_id)
|
360
|
-
logging.info(f"Finished execution of task {selected_task_id}, status: {self.tasks[selected_task_id].status}")
|
361
|
-
|
362
|
-
if self.tasks[selected_task_id].status == "completed":
|
363
|
-
completed_count += 1
|
364
|
-
logging.info(f"Task {selected_task_id} completed. Total completed: {completed_count}/{total_tasks}")
|
365
|
-
|
366
|
-
self.tasks[manager_task.id].status = "completed"
|
367
|
-
if self.verbose >= 1:
|
368
|
-
logging.info("All tasks completed under manager supervision.")
|
369
|
-
logging.info("Hierarchical task execution finished")
|
274
|
+
for task_id in process.hierarchical():
|
275
|
+
if isinstance(task_id, Task):
|
276
|
+
task_id = self.add_task(task_id)
|
277
|
+
self.run_task(task_id)
|
370
278
|
|
371
279
|
def get_task_status(self, task_id):
|
372
280
|
if task_id in self.tasks:
|
@@ -397,4 +305,20 @@ Provide a JSON with the structure:
|
|
397
305
|
return {
|
398
306
|
"task_status": self.get_all_tasks_status(),
|
399
307
|
"task_results": {task_id: self.get_task_result(task_id) for task_id in self.tasks}
|
400
|
-
}
|
308
|
+
}
|
309
|
+
|
310
|
+
def set_state(self, key: str, value: Any) -> None:
|
311
|
+
"""Set a state value"""
|
312
|
+
self._state[key] = value
|
313
|
+
|
314
|
+
def get_state(self, key: str, default: Any = None) -> Any:
|
315
|
+
"""Get a state value"""
|
316
|
+
return self._state.get(key, default)
|
317
|
+
|
318
|
+
def update_state(self, updates: Dict) -> None:
|
319
|
+
"""Update multiple state values"""
|
320
|
+
self._state.update(updates)
|
321
|
+
|
322
|
+
def clear_state(self) -> None:
|
323
|
+
"""Clear all state values"""
|
324
|
+
self._state.clear()
|
@@ -0,0 +1,262 @@
|
|
1
|
+
import logging
|
2
|
+
from typing import Dict, Optional, List
|
3
|
+
from pydantic import BaseModel
|
4
|
+
from ..agent.agent import Agent
|
5
|
+
from ..task.task import Task
|
6
|
+
|
7
|
+
class LoopItems(BaseModel):
|
8
|
+
items: List[any]
|
9
|
+
|
10
|
+
class Process:
|
11
|
+
def __init__(self, tasks: Dict[str, Task], agents: List[Agent], manager_llm: Optional[str] = None, verbose: bool = False):
|
12
|
+
self.tasks = tasks
|
13
|
+
self.agents = agents
|
14
|
+
self.manager_llm = manager_llm
|
15
|
+
self.verbose = verbose
|
16
|
+
|
17
|
+
def workflow(self):
|
18
|
+
# Build workflow relationships first
|
19
|
+
for task in self.tasks.values():
|
20
|
+
if task.next_tasks:
|
21
|
+
for next_task_name in task.next_tasks:
|
22
|
+
next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
|
23
|
+
if next_task:
|
24
|
+
next_task.previous_tasks.append(task.name)
|
25
|
+
|
26
|
+
# Find start task
|
27
|
+
start_task = None
|
28
|
+
for task_id, task in self.tasks.items():
|
29
|
+
if task.is_start:
|
30
|
+
start_task = task
|
31
|
+
break
|
32
|
+
|
33
|
+
if not start_task:
|
34
|
+
start_task = list(self.tasks.values())[0]
|
35
|
+
logging.info("No start task marked, using first task")
|
36
|
+
|
37
|
+
current_task = start_task
|
38
|
+
visited_tasks = set()
|
39
|
+
loop_data = {} # Store loop-specific data
|
40
|
+
|
41
|
+
while current_task and current_task.id not in visited_tasks:
|
42
|
+
task_id = current_task.id
|
43
|
+
logging.info(f"Executing workflow task: {current_task.name if current_task.name else task_id}")
|
44
|
+
|
45
|
+
# Add context from previous tasks to description
|
46
|
+
if current_task.previous_tasks or current_task.context:
|
47
|
+
context = "\nInput data from previous tasks:"
|
48
|
+
|
49
|
+
# Add data from previous tasks in workflow
|
50
|
+
for prev_name in current_task.previous_tasks:
|
51
|
+
prev_task = next((t for t in self.tasks.values() if t.name == prev_name), None)
|
52
|
+
if prev_task and prev_task.result:
|
53
|
+
# Handle loop data
|
54
|
+
if current_task.task_type == "loop":
|
55
|
+
# create a loop manager Agent
|
56
|
+
loop_manager = Agent(
|
57
|
+
name="Loop Manager",
|
58
|
+
role="Loop data processor",
|
59
|
+
goal="Process loop data and convert it to list format",
|
60
|
+
backstory="Expert at handling loop data and converting it to proper format",
|
61
|
+
llm=self.manager_llm,
|
62
|
+
verbose=self.verbose,
|
63
|
+
markdown=True
|
64
|
+
)
|
65
|
+
|
66
|
+
# get the loop data convert it to list using calling Agent class chat
|
67
|
+
loop_prompt = f"""
|
68
|
+
Process this data into a list format:
|
69
|
+
{prev_task.result.raw}
|
70
|
+
|
71
|
+
Return a JSON object with an 'items' array containing the items to process.
|
72
|
+
"""
|
73
|
+
loop_data_str = loop_manager.chat(
|
74
|
+
prompt=loop_prompt,
|
75
|
+
output_json=LoopItems
|
76
|
+
)
|
77
|
+
|
78
|
+
try:
|
79
|
+
# The response will already be parsed into LoopItems model
|
80
|
+
loop_data[f"loop_{current_task.name}"] = {
|
81
|
+
"items": loop_data_str.items,
|
82
|
+
"index": 0,
|
83
|
+
"remaining": len(loop_data_str.items)
|
84
|
+
}
|
85
|
+
context += f"\nCurrent loop item: {loop_data_str.items[0]}"
|
86
|
+
except Exception as e:
|
87
|
+
display_error(f"Failed to process loop data: {e}")
|
88
|
+
context += f"\n{prev_name}: {prev_task.result.raw}"
|
89
|
+
else:
|
90
|
+
context += f"\n{prev_name}: {prev_task.result.raw}"
|
91
|
+
|
92
|
+
# Add data from context tasks
|
93
|
+
if current_task.context:
|
94
|
+
for ctx_task in current_task.context:
|
95
|
+
if ctx_task.result and ctx_task.name != current_task.name:
|
96
|
+
context += f"\n{ctx_task.name}: {ctx_task.result.raw}"
|
97
|
+
|
98
|
+
# Update task description with context
|
99
|
+
current_task.description = current_task.description + context
|
100
|
+
|
101
|
+
# Execute task using existing run_task method
|
102
|
+
yield task_id
|
103
|
+
visited_tasks.add(task_id)
|
104
|
+
|
105
|
+
# Handle loop progression
|
106
|
+
if current_task.task_type == "loop":
|
107
|
+
loop_key = f"loop_{current_task.name}"
|
108
|
+
if loop_key in loop_data:
|
109
|
+
loop_info = loop_data[loop_key]
|
110
|
+
loop_info["index"] += 1
|
111
|
+
has_more = loop_info["remaining"] > 0
|
112
|
+
|
113
|
+
# Update result to trigger correct condition
|
114
|
+
if current_task.result:
|
115
|
+
result = current_task.result.raw
|
116
|
+
if has_more:
|
117
|
+
result += "\nmore"
|
118
|
+
else:
|
119
|
+
result += "\ndone"
|
120
|
+
current_task.result.raw = result
|
121
|
+
|
122
|
+
# Determine next task based on result
|
123
|
+
next_task = None
|
124
|
+
if current_task.result:
|
125
|
+
if current_task.task_type in ["decision", "loop"]:
|
126
|
+
result = current_task.result.raw.lower()
|
127
|
+
# Check conditions
|
128
|
+
for condition, tasks in current_task.condition.items():
|
129
|
+
if condition.lower() in result and tasks:
|
130
|
+
next_task_name = tasks[0]
|
131
|
+
next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
|
132
|
+
# For loops, allow revisiting the same task
|
133
|
+
if next_task and next_task.id == current_task.id:
|
134
|
+
visited_tasks.discard(current_task.id)
|
135
|
+
break
|
136
|
+
|
137
|
+
if not next_task and current_task.next_tasks:
|
138
|
+
next_task_name = current_task.next_tasks[0]
|
139
|
+
next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
|
140
|
+
|
141
|
+
current_task = next_task
|
142
|
+
if not current_task:
|
143
|
+
logging.info("Workflow execution completed")
|
144
|
+
break
|
145
|
+
|
146
|
+
def sequential(self):
|
147
|
+
for task_id in self.tasks:
|
148
|
+
if self.tasks[task_id].status != "completed":
|
149
|
+
yield task_id
|
150
|
+
|
151
|
+
def hierarchical(self):
|
152
|
+
logging.debug(f"Starting hierarchical task execution with {len(self.tasks)} tasks")
|
153
|
+
manager_agent = Agent(
|
154
|
+
name="Manager",
|
155
|
+
role="Project manager",
|
156
|
+
goal="Manage the entire flow of tasks and delegate them to the right agent",
|
157
|
+
backstory="Expert project manager to coordinate tasks among agents",
|
158
|
+
llm=self.manager_llm,
|
159
|
+
verbose=self.verbose,
|
160
|
+
markdown=True,
|
161
|
+
self_reflect=False
|
162
|
+
)
|
163
|
+
|
164
|
+
class ManagerInstructions(BaseModel):
|
165
|
+
task_id: int
|
166
|
+
agent_name: str
|
167
|
+
action: str
|
168
|
+
|
169
|
+
manager_task = Task(
|
170
|
+
name="manager_task",
|
171
|
+
description="Decide the order of tasks and which agent executes them",
|
172
|
+
expected_output="All tasks completed successfully",
|
173
|
+
agent=manager_agent
|
174
|
+
)
|
175
|
+
manager_task_id = yield manager_task
|
176
|
+
logging.info(f"Created manager task with ID {manager_task_id}")
|
177
|
+
|
178
|
+
completed_count = 0
|
179
|
+
total_tasks = len(self.tasks) - 1
|
180
|
+
logging.info(f"Need to complete {total_tasks} tasks (excluding manager task)")
|
181
|
+
|
182
|
+
while completed_count < total_tasks:
|
183
|
+
tasks_summary = []
|
184
|
+
for tid, tk in self.tasks.items():
|
185
|
+
if tk.name == "manager_task":
|
186
|
+
continue
|
187
|
+
task_info = {
|
188
|
+
"task_id": tid,
|
189
|
+
"name": tk.name,
|
190
|
+
"description": tk.description,
|
191
|
+
"status": tk.status if tk.status else "not started",
|
192
|
+
"agent": tk.agent.name if tk.agent else "No agent"
|
193
|
+
}
|
194
|
+
tasks_summary.append(task_info)
|
195
|
+
logging.info(f"Task {tid} status: {task_info}")
|
196
|
+
|
197
|
+
manager_prompt = f"""
|
198
|
+
Here is the current status of all tasks except yours (manager_task):
|
199
|
+
{tasks_summary}
|
200
|
+
|
201
|
+
Provide a JSON with the structure:
|
202
|
+
{{
|
203
|
+
"task_id": <int>,
|
204
|
+
"agent_name": "<string>",
|
205
|
+
"action": "<execute or stop>"
|
206
|
+
}}
|
207
|
+
"""
|
208
|
+
|
209
|
+
try:
|
210
|
+
logging.info("Requesting manager instructions...")
|
211
|
+
manager_response = client.beta.chat.completions.parse(
|
212
|
+
model=self.manager_llm,
|
213
|
+
messages=[
|
214
|
+
{"role": "system", "content": manager_task.description},
|
215
|
+
{"role": "user", "content": manager_prompt}
|
216
|
+
],
|
217
|
+
temperature=0.7,
|
218
|
+
response_format=ManagerInstructions
|
219
|
+
)
|
220
|
+
parsed_instructions = manager_response.choices[0].message.parsed
|
221
|
+
logging.info(f"Manager instructions: {parsed_instructions}")
|
222
|
+
except Exception as e:
|
223
|
+
display_error(f"Manager parse error: {e}")
|
224
|
+
logging.error(f"Manager parse error: {str(e)}", exc_info=True)
|
225
|
+
break
|
226
|
+
|
227
|
+
selected_task_id = parsed_instructions.task_id
|
228
|
+
selected_agent_name = parsed_instructions.agent_name
|
229
|
+
action = parsed_instructions.action
|
230
|
+
|
231
|
+
logging.info(f"Manager selected task_id={selected_task_id}, agent={selected_agent_name}, action={action}")
|
232
|
+
|
233
|
+
if action.lower() == "stop":
|
234
|
+
logging.info("Manager decided to stop task execution")
|
235
|
+
break
|
236
|
+
|
237
|
+
if selected_task_id not in self.tasks:
|
238
|
+
error_msg = f"Manager selected invalid task id {selected_task_id}"
|
239
|
+
display_error(error_msg)
|
240
|
+
logging.error(error_msg)
|
241
|
+
break
|
242
|
+
|
243
|
+
original_agent = self.tasks[selected_task_id].agent.name if self.tasks[selected_task_id].agent else "None"
|
244
|
+
for a in self.agents:
|
245
|
+
if a.name == selected_agent_name:
|
246
|
+
self.tasks[selected_task_id].agent = a
|
247
|
+
logging.info(f"Changed agent for task {selected_task_id} from {original_agent} to {selected_agent_name}")
|
248
|
+
break
|
249
|
+
|
250
|
+
if self.tasks[selected_task_id].status != "completed":
|
251
|
+
logging.info(f"Starting execution of task {selected_task_id}")
|
252
|
+
yield selected_task_id
|
253
|
+
logging.info(f"Finished execution of task {selected_task_id}, status: {self.tasks[selected_task_id].status}")
|
254
|
+
|
255
|
+
if self.tasks[selected_task_id].status == "completed":
|
256
|
+
completed_count += 1
|
257
|
+
logging.info(f"Task {selected_task_id} completed. Total completed: {completed_count}/{total_tasks}")
|
258
|
+
|
259
|
+
self.tasks[manager_task.id].status = "completed"
|
260
|
+
if self.verbose >= 1:
|
261
|
+
logging.info("All tasks completed under manager supervision.")
|
262
|
+
logging.info("Hierarchical task execution finished")
|
praisonaiagents/task/task.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1
1
|
import logging
|
2
|
-
from typing import List, Optional, Dict, Any, Type
|
2
|
+
from typing import List, Optional, Dict, Any, Type, Callable, Union
|
3
3
|
from pydantic import BaseModel
|
4
4
|
from ..main import TaskOutput
|
5
5
|
from ..agent.agent import Agent
|
6
|
+
import uuid
|
6
7
|
|
7
8
|
class Task:
|
8
9
|
def __init__(
|
@@ -23,11 +24,17 @@ class Task:
|
|
23
24
|
result: Optional[TaskOutput] = None,
|
24
25
|
create_directory: Optional[bool] = False,
|
25
26
|
id: Optional[int] = None,
|
26
|
-
images: Optional[List[str]] = None
|
27
|
+
images: Optional[List[str]] = None,
|
28
|
+
next_tasks: Optional[List[str]] = None,
|
29
|
+
task_type: str = "task",
|
30
|
+
condition: Optional[Dict[str, List[str]]] = None,
|
31
|
+
is_start: bool = False,
|
32
|
+
loop_state: Optional[Dict[str, Union[str, int]]] = None
|
27
33
|
):
|
34
|
+
self.id = str(uuid.uuid4()) if id is None else str(id)
|
35
|
+
self.name = name
|
28
36
|
self.description = description
|
29
37
|
self.expected_output = expected_output
|
30
|
-
self.name = name
|
31
38
|
self.agent = agent
|
32
39
|
self.tools = tools if tools else []
|
33
40
|
self.context = context if context else []
|
@@ -40,11 +47,18 @@ class Task:
|
|
40
47
|
self.status = status
|
41
48
|
self.result = result
|
42
49
|
self.create_directory = create_directory
|
43
|
-
self.id = id
|
44
50
|
self.images = images if images else []
|
51
|
+
self.next_tasks = next_tasks if next_tasks else []
|
52
|
+
self.task_type = task_type
|
53
|
+
self.condition = condition if condition else {}
|
54
|
+
self.is_start = is_start
|
55
|
+
self.loop_state = loop_state if loop_state else {}
|
45
56
|
|
46
57
|
if self.output_json and self.output_pydantic:
|
47
58
|
raise ValueError("Only one output type can be defined")
|
48
59
|
|
60
|
+
# Track previous tasks based on next_tasks relationships
|
61
|
+
self.previous_tasks = []
|
62
|
+
|
49
63
|
def __str__(self):
|
50
64
|
return f"Task(name='{self.name if self.name else 'None'}', description='{self.description}', agent='{self.agent.name if self.agent else 'None'}', status='{self.status}')"
|
@@ -1,9 +1,9 @@
|
|
1
1
|
praisonaiagents/__init__.py,sha256=gI8vEabBTRPsE_E8GA5sBMi4sTtJI-YokPrH2Nor-k0,741
|
2
2
|
praisonaiagents/main.py,sha256=K2OxVKPmo4dNJbSWIsXDi_hm9CRx5O4km_74UGcszhk,5744
|
3
3
|
praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
|
4
|
-
praisonaiagents/agent/agent.py,sha256=
|
4
|
+
praisonaiagents/agent/agent.py,sha256=zTYcDpJ5DzzBnefwLvhrtBlGQoRI4ZZAioDu5nKTPSs,24042
|
5
5
|
praisonaiagents/agents/__init__.py,sha256=7RDeQNSqZg5uBjD4M_0p_F6YgfWuDuxPFydPU50kDYc,120
|
6
|
-
praisonaiagents/agents/agents.py,sha256=
|
6
|
+
praisonaiagents/agents/agents.py,sha256=ngPFNTmv3whf22litkQacUGaRghX-NbLPAC6Ejy9qoU,13003
|
7
7
|
praisonaiagents/build/lib/praisonaiagents/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
8
8
|
praisonaiagents/build/lib/praisonaiagents/main.py,sha256=zDhN5KKtKbfruolDNxlyJkcFlkSt4KQkQTDRfQVAhxc,3960
|
9
9
|
praisonaiagents/build/lib/praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
|
@@ -12,9 +12,11 @@ praisonaiagents/build/lib/praisonaiagents/agents/__init__.py,sha256=cgCLFLFcLp9S
|
|
12
12
|
praisonaiagents/build/lib/praisonaiagents/agents/agents.py,sha256=P2FAtlfD3kPib5a1oLVYanxlU6e4-GhBMQ0YDY5MHY4,13473
|
13
13
|
praisonaiagents/build/lib/praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
14
14
|
praisonaiagents/build/lib/praisonaiagents/task/task.py,sha256=4Y1qX8OeEFcid2yhAiPYylvHpuDmWORsyNL16_BiVvI,1831
|
15
|
+
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
16
|
+
praisonaiagents/process/process.py,sha256=BgtFgTQjLoqHzj97zDtALjuP_ciOErMDB9quDTlZFjg,11676
|
15
17
|
praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
16
|
-
praisonaiagents/task/task.py,sha256
|
17
|
-
praisonaiagents-0.0.
|
18
|
-
praisonaiagents-0.0.
|
19
|
-
praisonaiagents-0.0.
|
20
|
-
praisonaiagents-0.0.
|
18
|
+
praisonaiagents/task/task.py,sha256=oMC5Zz1dMj0Ceice69aBS1KQQXMLqphc8wNOQ9zcu0Q,2570
|
19
|
+
praisonaiagents-0.0.15.dist-info/METADATA,sha256=QERXlBCD9drRfFrI5GwbLzbIaLX_FnOM9UVxFwIwDBo,233
|
20
|
+
praisonaiagents-0.0.15.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
21
|
+
praisonaiagents-0.0.15.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
22
|
+
praisonaiagents-0.0.15.dist-info/RECORD,,
|
File without changes
|
File without changes
|