praisonaiagents 0.0.58__py3-none-any.whl → 0.0.59__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/process/process.py +389 -124
- praisonaiagents/task/task.py +18 -16
- {praisonaiagents-0.0.58.dist-info → praisonaiagents-0.0.59.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.58.dist-info → praisonaiagents-0.0.59.dist-info}/RECORD +6 -6
- {praisonaiagents-0.0.58.dist-info → praisonaiagents-0.0.59.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.58.dist-info → praisonaiagents-0.0.59.dist-info}/top_level.txt +0 -0
@@ -12,6 +12,8 @@ class LoopItems(BaseModel):
|
|
12
12
|
items: List[Any]
|
13
13
|
|
14
14
|
class Process:
|
15
|
+
DEFAULT_RETRY_LIMIT = 3 # Predefined retry limit in a common place
|
16
|
+
|
15
17
|
def __init__(self, tasks: Dict[str, Task], agents: List[Agent], manager_llm: Optional[str] = None, verbose: bool = False, max_iter: int = 10):
|
16
18
|
logging.debug(f"=== Initializing Process ===")
|
17
19
|
logging.debug(f"Number of tasks: {len(tasks)}")
|
@@ -19,12 +21,52 @@ class Process:
|
|
19
21
|
logging.debug(f"Manager LLM: {manager_llm}")
|
20
22
|
logging.debug(f"Verbose mode: {verbose}")
|
21
23
|
logging.debug(f"Max iterations: {max_iter}")
|
22
|
-
|
24
|
+
|
23
25
|
self.tasks = tasks
|
24
26
|
self.agents = agents
|
25
27
|
self.manager_llm = manager_llm
|
26
28
|
self.verbose = verbose
|
27
29
|
self.max_iter = max_iter
|
30
|
+
self.task_retry_counter: Dict[str, int] = {} # Initialize retry counter
|
31
|
+
self.workflow_finished = False # ADDED: Workflow finished flag
|
32
|
+
|
33
|
+
def _find_next_not_started_task(self) -> Optional[Task]:
|
34
|
+
"""Fallback mechanism to find the next 'not started' task."""
|
35
|
+
fallback_attempts = 0
|
36
|
+
temp_current_task = None
|
37
|
+
|
38
|
+
# Clear previous task context before finding next task
|
39
|
+
for task in self.tasks.values():
|
40
|
+
if hasattr(task, 'description') and 'Input data from previous tasks:' in task.description:
|
41
|
+
task.description = task.description.split('Input data from previous tasks:')[0].strip()
|
42
|
+
|
43
|
+
while fallback_attempts < Process.DEFAULT_RETRY_LIMIT and not temp_current_task:
|
44
|
+
fallback_attempts += 1
|
45
|
+
logging.debug(f"Fallback attempt {fallback_attempts}: Trying to find next 'not started' task.")
|
46
|
+
for task_candidate in self.tasks.values():
|
47
|
+
if task_candidate.status == "not started":
|
48
|
+
# Check if there's a condition path to this task
|
49
|
+
current_conditions = task_candidate.condition or {}
|
50
|
+
leads_to_task = any(
|
51
|
+
task_value for task_value in current_conditions.values()
|
52
|
+
if isinstance(task_value, (list, str)) and task_value
|
53
|
+
)
|
54
|
+
|
55
|
+
if not leads_to_task and not task_candidate.next_tasks:
|
56
|
+
continue # Skip if no valid path exists
|
57
|
+
|
58
|
+
if self.task_retry_counter.get(task_candidate.id, 0) < Process.DEFAULT_RETRY_LIMIT:
|
59
|
+
self.task_retry_counter[task_candidate.id] = self.task_retry_counter.get(task_candidate.id, 0) + 1
|
60
|
+
temp_current_task = task_candidate
|
61
|
+
logging.debug(f"Fallback attempt {fallback_attempts}: Found 'not started' task: {temp_current_task.name}, retry count: {self.task_retry_counter[temp_current_task.id]}")
|
62
|
+
return temp_current_task # Return the found task immediately
|
63
|
+
else:
|
64
|
+
logging.debug(f"Max retries reached for task {task_candidate.name} in fallback mode, marking as failed.")
|
65
|
+
task_candidate.status = "failed"
|
66
|
+
if not temp_current_task:
|
67
|
+
logging.debug(f"Fallback attempt {fallback_attempts}: No 'not started' task found within retry limit.")
|
68
|
+
return None # Return None if no task found after all attempts
|
69
|
+
|
28
70
|
|
29
71
|
async def aworkflow(self) -> AsyncGenerator[str, None]:
|
30
72
|
"""Async version of workflow method"""
|
@@ -48,23 +90,50 @@ class Process:
|
|
48
90
|
start_task = task
|
49
91
|
logging.debug(f"Found marked start task: {task.name} (id: {task_id})")
|
50
92
|
break
|
51
|
-
|
93
|
+
|
52
94
|
if not start_task:
|
53
95
|
start_task = list(self.tasks.values())[0]
|
54
96
|
logging.debug(f"No start task marked, using first task: {start_task.name}")
|
55
|
-
|
97
|
+
|
56
98
|
current_task = start_task
|
57
99
|
visited_tasks = set()
|
58
100
|
loop_data = {} # Store loop-specific data
|
59
101
|
|
60
102
|
# TODO: start task with loop feature is not available in aworkflow method
|
61
|
-
|
103
|
+
|
62
104
|
while current_task:
|
63
105
|
current_iter += 1
|
64
106
|
if current_iter > self.max_iter:
|
65
107
|
logging.info(f"Max iteration limit {self.max_iter} reached, ending workflow.")
|
66
108
|
break
|
67
109
|
|
110
|
+
# ADDED: Check workflow finished flag at the start of each cycle
|
111
|
+
if self.workflow_finished:
|
112
|
+
logging.info("Workflow finished early as all tasks are completed.")
|
113
|
+
break
|
114
|
+
|
115
|
+
# Add task summary at start of each cycle
|
116
|
+
logging.debug(f"""
|
117
|
+
=== Workflow Cycle {current_iter} Summary ===
|
118
|
+
Total tasks: {len(self.tasks)}
|
119
|
+
Outstanding tasks: {sum(1 for t in self.tasks.values() if t.status != "completed")}
|
120
|
+
Completed tasks: {sum(1 for t in self.tasks.values() if t.status == "completed")}
|
121
|
+
Tasks by status:
|
122
|
+
- Not started: {sum(1 for t in self.tasks.values() if t.status == "not started")}
|
123
|
+
- In progress: {sum(1 for t in self.tasks.values() if t.status == "in_progress")}
|
124
|
+
- Completed: {sum(1 for t in self.tasks.values() if t.status == "completed")}
|
125
|
+
Tasks by type:
|
126
|
+
- Loop tasks: {sum(1 for t in self.tasks.values() if t.task_type == "loop")}
|
127
|
+
- Decision tasks: {sum(1 for t in self.tasks.values() if t.task_type == "decision")}
|
128
|
+
- Regular tasks: {sum(1 for t in self.tasks.values() if t.task_type not in ["loop", "decision"])}
|
129
|
+
""")
|
130
|
+
|
131
|
+
# ADDED: Check if all tasks are completed and set workflow_finished flag
|
132
|
+
if all(task.status == "completed" for task in self.tasks.values()):
|
133
|
+
logging.info("All tasks are completed.")
|
134
|
+
self.workflow_finished = True
|
135
|
+
# The next iteration loop check will break the workflow
|
136
|
+
|
68
137
|
task_id = current_task.id
|
69
138
|
logging.debug(f"""
|
70
139
|
=== Task Execution Details ===
|
@@ -76,11 +145,11 @@ Next tasks: {current_task.next_tasks}
|
|
76
145
|
Context tasks: {[t.name for t in current_task.context] if current_task.context else []}
|
77
146
|
Description length: {len(current_task.description)}
|
78
147
|
""")
|
79
|
-
|
148
|
+
|
80
149
|
# Add context from previous tasks to description
|
81
150
|
if current_task.previous_tasks or current_task.context:
|
82
151
|
context = "\nInput data from previous tasks:"
|
83
|
-
|
152
|
+
|
84
153
|
# Add data from previous tasks in workflow
|
85
154
|
for prev_name in current_task.previous_tasks:
|
86
155
|
prev_task = next((t for t in self.tasks.values() if t.name == prev_name), None)
|
@@ -90,16 +159,16 @@ Description length: {len(current_task.description)}
|
|
90
159
|
context += f"\n{prev_name}: {prev_task.result.raw}"
|
91
160
|
else:
|
92
161
|
context += f"\n{prev_name}: {prev_task.result.raw}"
|
93
|
-
|
162
|
+
|
94
163
|
# Add data from context tasks
|
95
164
|
if current_task.context:
|
96
165
|
for ctx_task in current_task.context:
|
97
166
|
if ctx_task.result and ctx_task.name != current_task.name:
|
98
167
|
context += f"\n{ctx_task.name}: {ctx_task.result.raw}"
|
99
|
-
|
168
|
+
|
100
169
|
# Update task description with context
|
101
170
|
current_task.description = current_task.description + context
|
102
|
-
|
171
|
+
|
103
172
|
# Skip execution for loop tasks, only process their subtasks
|
104
173
|
if current_task.task_type == "loop":
|
105
174
|
logging.debug(f"""
|
@@ -112,7 +181,7 @@ Condition: {current_task.condition}
|
|
112
181
|
Subtasks created: {getattr(current_task, '_subtasks_created', False)}
|
113
182
|
Input file: {getattr(current_task, 'input_file', None)}
|
114
183
|
""")
|
115
|
-
|
184
|
+
|
116
185
|
# Check if subtasks are created and completed
|
117
186
|
if getattr(current_task, "_subtasks_created", False):
|
118
187
|
subtasks = [
|
@@ -125,7 +194,7 @@ Total subtasks: {len(subtasks)}
|
|
125
194
|
Completed: {sum(1 for st in subtasks if st.status == "completed")}
|
126
195
|
Pending: {sum(1 for st in subtasks if st.status != "completed")}
|
127
196
|
""")
|
128
|
-
|
197
|
+
|
129
198
|
# Log detailed subtask info
|
130
199
|
for st in subtasks:
|
131
200
|
logging.debug(f"""
|
@@ -134,27 +203,53 @@ Subtask: {st.name}
|
|
134
203
|
- Next tasks: {st.next_tasks}
|
135
204
|
- Condition: {st.condition}
|
136
205
|
""")
|
137
|
-
|
206
|
+
|
138
207
|
if subtasks and all(st.status == "completed" for st in subtasks):
|
139
208
|
logging.debug(f"=== All {len(subtasks)} subtasks completed for {current_task.name} ===")
|
140
|
-
|
209
|
+
|
141
210
|
# Mark loop task completed and move to next task
|
142
211
|
current_task.status = "completed"
|
143
212
|
logging.debug(f"Loop {current_task.name} marked as completed")
|
213
|
+
|
214
|
+
# Set result for loop task when all subtasks complete
|
215
|
+
if not current_task.result:
|
216
|
+
# Get result from last completed subtask
|
217
|
+
last_subtask = next((t for t in reversed(subtasks) if t.status == "completed"), None)
|
218
|
+
if last_subtask and last_subtask.result:
|
219
|
+
current_task.result = last_subtask.result
|
144
220
|
|
145
|
-
#
|
146
|
-
if current_task.
|
147
|
-
|
148
|
-
|
149
|
-
|
221
|
+
# Route to next task based on condition
|
222
|
+
if current_task.condition:
|
223
|
+
# Get decision from result if available
|
224
|
+
decision_str = None
|
225
|
+
if current_task.result:
|
226
|
+
if current_task.result.pydantic and hasattr(current_task.result.pydantic, "decision"):
|
227
|
+
decision_str = current_task.result.pydantic.decision.lower()
|
228
|
+
elif current_task.result.raw:
|
229
|
+
decision_str = current_task.result.raw.lower()
|
230
|
+
|
231
|
+
# For loop tasks, use "done" to follow condition path
|
232
|
+
if current_task.task_type == "loop" and all(t.status == "completed" for t in subtasks):
|
233
|
+
decision_str = "done"
|
234
|
+
|
235
|
+
target_tasks = current_task.condition.get(decision_str, []) if decision_str else []
|
236
|
+
task_value = target_tasks[0] if isinstance(target_tasks, list) else target_tasks
|
237
|
+
next_task = next((t for t in self.tasks.values() if t.name == task_value), None)
|
150
238
|
if next_task:
|
151
|
-
|
152
|
-
logging.debug(f"
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
239
|
+
next_task.status = "not started" # Reset status to allow execution
|
240
|
+
logging.debug(f"Routing to {next_task.name} based on decision: {decision_str}")
|
241
|
+
self.workflow_finished = False
|
242
|
+
current_task = next_task
|
243
|
+
# Ensure the task is yielded for execution
|
244
|
+
if current_task.id not in visited_tasks:
|
245
|
+
yield current_task.id
|
246
|
+
visited_tasks.add(current_task.id)
|
247
|
+
else:
|
248
|
+
# End workflow if no valid next task found
|
249
|
+
logging.info(f"No valid next task found for decision: {decision_str}")
|
250
|
+
self.workflow_finished = True
|
251
|
+
current_task = None
|
252
|
+
break
|
158
253
|
else:
|
159
254
|
logging.debug(f"No subtasks created yet for {current_task.name}")
|
160
255
|
# Create subtasks if needed
|
@@ -179,25 +274,38 @@ Subtask: {st.name}
|
|
179
274
|
logging.debug(f"Task next_tasks: {current_task.next_tasks}")
|
180
275
|
yield task_id
|
181
276
|
visited_tasks.add(task_id)
|
182
|
-
|
277
|
+
|
278
|
+
# Only end workflow if no next_tasks AND no conditions
|
279
|
+
if not current_task.next_tasks and not current_task.condition and not any(
|
280
|
+
t.task_type == "loop" and current_task.name.startswith(t.name + "_")
|
281
|
+
for t in self.tasks.values()
|
282
|
+
):
|
283
|
+
logging.info(f"Task {current_task.name} has no next tasks, ending workflow")
|
284
|
+
self.workflow_finished = True
|
285
|
+
current_task = None
|
286
|
+
break
|
287
|
+
|
183
288
|
# Reset completed task to "not started" so it can run again
|
184
289
|
if self.tasks[task_id].status == "completed":
|
185
|
-
# Never reset loop tasks, decision tasks, or their subtasks
|
290
|
+
# Never reset loop tasks, decision tasks, or their subtasks if rerun is False
|
186
291
|
subtask_name = self.tasks[task_id].name
|
292
|
+
task_to_check = self.tasks[task_id]
|
187
293
|
logging.debug(f"=== Checking reset for completed task: {subtask_name} ===")
|
188
|
-
logging.debug(f"Task type: {
|
189
|
-
logging.debug(f"Task status before reset check: {
|
190
|
-
|
191
|
-
|
192
|
-
|
294
|
+
logging.debug(f"Task type: {task_to_check.task_type}")
|
295
|
+
logging.debug(f"Task status before reset check: {task_to_check.status}")
|
296
|
+
logging.debug(f"Task rerun: {getattr(task_to_check, 'rerun', True)}") # default to True if not set
|
297
|
+
|
298
|
+
if (getattr(task_to_check, 'rerun', True) and # Corrected condition - reset only if rerun is True (or default True)
|
299
|
+
task_to_check.task_type != "loop" and # Removed "decision" from exclusion
|
300
|
+
not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
|
193
301
|
for t in self.tasks.values())):
|
194
302
|
logging.debug(f"=== Resetting non-loop, non-decision task {subtask_name} to 'not started' ===")
|
195
303
|
self.tasks[task_id].status = "not started"
|
196
304
|
logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
|
197
305
|
else:
|
198
|
-
logging.debug(f"=== Skipping reset for loop/decision/subtask: {subtask_name} ===")
|
306
|
+
logging.debug(f"=== Skipping reset for loop/decision/subtask or rerun=False: {subtask_name} ===")
|
199
307
|
logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
|
200
|
-
|
308
|
+
|
201
309
|
# Handle loop progression
|
202
310
|
if current_task.task_type == "loop":
|
203
311
|
loop_key = f"loop_{current_task.name}"
|
@@ -205,7 +313,7 @@ Subtask: {st.name}
|
|
205
313
|
loop_info = loop_data[loop_key]
|
206
314
|
loop_info["index"] += 1
|
207
315
|
has_more = loop_info["remaining"] > 0
|
208
|
-
|
316
|
+
|
209
317
|
# Update result to trigger correct condition
|
210
318
|
if current_task.result:
|
211
319
|
result = current_task.result.raw
|
@@ -214,38 +322,77 @@ Subtask: {st.name}
|
|
214
322
|
else:
|
215
323
|
result += "\ndone"
|
216
324
|
current_task.result.raw = result
|
217
|
-
|
325
|
+
|
218
326
|
# Determine next task based on result
|
219
327
|
next_task = None
|
220
328
|
if current_task and current_task.result:
|
221
329
|
if current_task.task_type in ["decision", "loop"]:
|
222
|
-
#
|
330
|
+
# Get decision from pydantic or raw response
|
223
331
|
decision_str = current_task.result.raw.lower()
|
224
332
|
if current_task.result.pydantic and hasattr(current_task.result.pydantic, "decision"):
|
225
333
|
decision_str = current_task.result.pydantic.decision.lower()
|
226
334
|
|
227
|
-
# Check conditions
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
next_task_name = task_value
|
237
|
-
next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
|
238
|
-
# For loops, allow revisiting the same task
|
239
|
-
if next_task and next_task.id == current_task.id:
|
240
|
-
visited_tasks.discard(current_task.id)
|
335
|
+
# Check if task has conditions and next_tasks
|
336
|
+
if current_task.condition:
|
337
|
+
# Get target task based on decision
|
338
|
+
target_tasks = current_task.condition.get(decision_str, [])
|
339
|
+
# Handle all forms of exit conditions
|
340
|
+
if not target_tasks or target_tasks == "exit" or (isinstance(target_tasks, list) and (not target_tasks or target_tasks[0] == "exit")):
|
341
|
+
logging.info(f"Workflow exit condition met on decision: {decision_str}")
|
342
|
+
self.workflow_finished = True
|
343
|
+
current_task = None
|
241
344
|
break
|
242
|
-
|
345
|
+
else:
|
346
|
+
# Find the target task by name
|
347
|
+
task_value = target_tasks[0] if isinstance(target_tasks, list) else target_tasks
|
348
|
+
next_task = next((t for t in self.tasks.values() if t.name == task_value), None)
|
349
|
+
if next_task:
|
350
|
+
next_task.status = "not started" # Reset status to allow execution
|
351
|
+
logging.debug(f"Routing to {next_task.name} based on decision: {decision_str}")
|
352
|
+
# Don't mark workflow as finished when following condition path
|
353
|
+
self.workflow_finished = False
|
354
|
+
|
355
|
+
# If no condition-based routing, use next_tasks
|
243
356
|
if not next_task and current_task and current_task.next_tasks:
|
244
357
|
next_task_name = current_task.next_tasks[0]
|
245
358
|
next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
|
246
|
-
|
359
|
+
if next_task:
|
360
|
+
# Reset the next task to allow re-execution
|
361
|
+
next_task.status = "not started"
|
362
|
+
# Don't mark workflow as finished if we're in a task loop
|
363
|
+
if (next_task.previous_tasks and current_task.name in next_task.previous_tasks and
|
364
|
+
next_task.next_tasks and
|
365
|
+
next_task.next_tasks[0] in self.tasks and
|
366
|
+
next_task.name in self.tasks[next_task.next_tasks[0]].previous_tasks):
|
367
|
+
self.workflow_finished = False
|
368
|
+
logging.debug(f"Following next_tasks to {next_task.name}")
|
369
|
+
|
247
370
|
current_task = next_task
|
248
371
|
if not current_task:
|
372
|
+
current_task = self._find_next_not_started_task() # General fallback if no next task in workflow
|
373
|
+
|
374
|
+
|
375
|
+
if not current_task:
|
376
|
+
# Add final workflow summary
|
377
|
+
logging.debug(f"""
|
378
|
+
=== Final Workflow Summary ===
|
379
|
+
Total tasks processed: {len(self.tasks)}
|
380
|
+
Final status:
|
381
|
+
- Completed tasks: {sum(1 for t in self.tasks.values() if t.status == "completed")}
|
382
|
+
- Outstanding tasks: {sum(1 for t in self.tasks.values() if t.status != "completed")}
|
383
|
+
Tasks by status:
|
384
|
+
- Not started: {sum(1 for t in self.tasks.values() if t.status == "not started")}
|
385
|
+
- In progress: {sum(1 for t in self.tasks.values() if t.status == "in_progress")}
|
386
|
+
- Completed: {sum(1 for t in self.tasks.values() if t.status == "completed")}
|
387
|
+
- Failed: {sum(1 for t in self.tasks.values() if t.status == "failed")}
|
388
|
+
Tasks by type:
|
389
|
+
- Loop tasks: {sum(1 for t in self.tasks.values() if t.task_type == "loop")}
|
390
|
+
- Decision tasks: {sum(1 for t in self.tasks.values() if t.task_type == "decision")}
|
391
|
+
- Regular tasks: {sum(1 for t in self.tasks.values() if t.task_type not in ["loop", "decision"])}
|
392
|
+
Total iterations: {current_iter}
|
393
|
+
Workflow Finished: {self.workflow_finished} # ADDED: Workflow Finished Status
|
394
|
+
""")
|
395
|
+
|
249
396
|
logging.info("Workflow execution completed")
|
250
397
|
break
|
251
398
|
|
@@ -256,6 +403,7 @@ Task: {current_task.name}
|
|
256
403
|
Final status: {current_task.status}
|
257
404
|
Next task: {next_task.name if next_task else None}
|
258
405
|
Iteration: {current_iter}/{self.max_iter}
|
406
|
+
Workflow Finished: {self.workflow_finished} # ADDED: Workflow Finished Status
|
259
407
|
""")
|
260
408
|
|
261
409
|
async def asequential(self) -> AsyncGenerator[str, None]:
|
@@ -387,7 +535,7 @@ Provide a JSON with the structure:
|
|
387
535
|
self.tasks[manager_task.id].status = "completed"
|
388
536
|
if self.verbose >= 1:
|
389
537
|
logging.info("All tasks completed under manager supervision.")
|
390
|
-
logging.info("Hierarchical task execution finished")
|
538
|
+
logging.info("Hierarchical task execution finished")
|
391
539
|
|
392
540
|
def workflow(self):
|
393
541
|
"""Synchronous version of workflow method"""
|
@@ -406,12 +554,12 @@ Provide a JSON with the structure:
|
|
406
554
|
if task.is_start:
|
407
555
|
start_task = task
|
408
556
|
break
|
409
|
-
|
557
|
+
|
410
558
|
if not start_task:
|
411
559
|
start_task = list(self.tasks.values())[0]
|
412
560
|
logging.info("No start task marked, using first task")
|
413
561
|
|
414
|
-
# If loop type and no input_file, default to tasks.csv
|
562
|
+
# If loop type and no input_file, default to tasks.csv
|
415
563
|
if start_task and start_task.task_type == "loop" and not start_task.input_file:
|
416
564
|
start_task.input_file = "tasks.csv"
|
417
565
|
|
@@ -420,17 +568,17 @@ Provide a JSON with the structure:
|
|
420
568
|
try:
|
421
569
|
file_ext = os.path.splitext(start_task.input_file)[1].lower()
|
422
570
|
new_tasks = []
|
423
|
-
|
571
|
+
|
424
572
|
if file_ext == ".csv":
|
425
573
|
with open(start_task.input_file, "r", encoding="utf-8") as f:
|
426
574
|
reader = csv.reader(f, quotechar='"', escapechar='\\') # Handle quoted/escaped fields
|
427
575
|
previous_task = None
|
428
576
|
task_count = 0
|
429
|
-
|
577
|
+
|
430
578
|
for i, row in enumerate(reader):
|
431
579
|
if not row: # Skip truly empty rows
|
432
580
|
continue
|
433
|
-
|
581
|
+
|
434
582
|
# Properly handle Q&A pairs with potential commas
|
435
583
|
task_desc = row[0].strip() if row else ""
|
436
584
|
if len(row) > 1:
|
@@ -438,16 +586,16 @@ Provide a JSON with the structure:
|
|
438
586
|
question = row[0].strip()
|
439
587
|
answer = ",".join(field.strip() for field in row[1:])
|
440
588
|
task_desc = f"Question: {question}\nAnswer: {answer}"
|
441
|
-
|
589
|
+
|
442
590
|
if not task_desc: # Skip rows with empty content
|
443
591
|
continue
|
444
|
-
|
592
|
+
|
445
593
|
task_count += 1
|
446
594
|
logging.debug(f"Processing CSV row {i+1}: {task_desc}")
|
447
|
-
|
595
|
+
|
448
596
|
# Inherit next_tasks from parent loop task
|
449
597
|
inherited_next_tasks = start_task.next_tasks if start_task.next_tasks else []
|
450
|
-
|
598
|
+
|
451
599
|
row_task = Task(
|
452
600
|
description=f"{start_task.description}\n{task_desc}" if start_task.description else task_desc,
|
453
601
|
agent=start_task.agent,
|
@@ -464,16 +612,16 @@ Provide a JSON with the structure:
|
|
464
612
|
)
|
465
613
|
self.tasks[row_task.id] = row_task
|
466
614
|
new_tasks.append(row_task)
|
467
|
-
|
615
|
+
|
468
616
|
if previous_task:
|
469
617
|
previous_task.next_tasks = [row_task.name]
|
470
618
|
previous_task.condition["done"] = [row_task.name] # Use "done" consistently
|
471
619
|
previous_task = row_task
|
472
|
-
|
620
|
+
|
473
621
|
# For the last task in the loop, ensure it points to parent's next tasks
|
474
622
|
if task_count > 0 and not row_task.next_tasks:
|
475
623
|
row_task.next_tasks = inherited_next_tasks
|
476
|
-
|
624
|
+
|
477
625
|
logging.info(f"Processed {task_count} rows from CSV file")
|
478
626
|
else:
|
479
627
|
# If not CSV, read lines
|
@@ -495,7 +643,7 @@ Provide a JSON with the structure:
|
|
495
643
|
)
|
496
644
|
self.tasks[row_task.id] = row_task
|
497
645
|
new_tasks.append(row_task)
|
498
|
-
|
646
|
+
|
499
647
|
if previous_task:
|
500
648
|
previous_task.next_tasks = [row_task.name]
|
501
649
|
previous_task.condition["complete"] = [row_task.name]
|
@@ -511,26 +659,54 @@ Provide a JSON with the structure:
|
|
511
659
|
current_task = start_task
|
512
660
|
visited_tasks = set()
|
513
661
|
loop_data = {} # Store loop-specific data
|
514
|
-
|
662
|
+
|
515
663
|
while current_task:
|
516
664
|
current_iter += 1
|
517
665
|
if current_iter > self.max_iter:
|
518
666
|
logging.info(f"Max iteration limit {self.max_iter} reached, ending workflow.")
|
519
667
|
break
|
520
668
|
|
669
|
+
# ADDED: Check workflow finished flag at the start of each cycle
|
670
|
+
if self.workflow_finished:
|
671
|
+
logging.info("Workflow finished early as all tasks are completed.")
|
672
|
+
break
|
673
|
+
|
674
|
+
# Add task summary at start of each cycle
|
675
|
+
logging.debug(f"""
|
676
|
+
=== Workflow Cycle {current_iter} Summary ===
|
677
|
+
Total tasks: {len(self.tasks)}
|
678
|
+
Outstanding tasks: {sum(1 for t in self.tasks.values() if t.status != "completed")}
|
679
|
+
Completed tasks: {sum(1 for t in self.tasks.values() if t.status == "completed")}
|
680
|
+
Tasks by status:
|
681
|
+
- Not started: {sum(1 for t in self.tasks.values() if t.status == "not started")}
|
682
|
+
- In progress: {sum(1 for t in self.tasks.values() if t.status == "in_progress")}
|
683
|
+
- Completed: {sum(1 for t in self.tasks.values() if t.status == "completed")}
|
684
|
+
Tasks by type:
|
685
|
+
- Loop tasks: {sum(1 for t in self.tasks.values() if t.task_type == "loop")}
|
686
|
+
- Decision tasks: {sum(1 for t in self.tasks.values() if t.task_type == "decision")}
|
687
|
+
- Regular tasks: {sum(1 for t in self.tasks.values() if t.task_type not in ["loop", "decision"])}
|
688
|
+
""")
|
689
|
+
|
690
|
+
# ADDED: Check if all tasks are completed and set workflow_finished flag
|
691
|
+
if all(task.status == "completed" for task in self.tasks.values()):
|
692
|
+
logging.info("All tasks are completed.")
|
693
|
+
self.workflow_finished = True
|
694
|
+
# The next iteration loop check will break the workflow
|
695
|
+
|
696
|
+
|
521
697
|
# Handle loop task file reading at runtime
|
522
|
-
if (current_task.task_type == "loop" and
|
523
|
-
current_task is not start_task and
|
698
|
+
if (current_task.task_type == "loop" and
|
699
|
+
current_task is not start_task and
|
524
700
|
getattr(current_task, "_subtasks_created", False) is not True):
|
525
|
-
|
701
|
+
|
526
702
|
if not current_task.input_file:
|
527
703
|
current_task.input_file = "tasks.csv"
|
528
|
-
|
704
|
+
|
529
705
|
if getattr(current_task, "input_file", None):
|
530
706
|
try:
|
531
707
|
file_ext = os.path.splitext(current_task.input_file)[1].lower()
|
532
708
|
new_tasks = []
|
533
|
-
|
709
|
+
|
534
710
|
if file_ext == ".csv":
|
535
711
|
with open(current_task.input_file, "r", encoding="utf-8") as f:
|
536
712
|
reader = csv.reader(f)
|
@@ -552,7 +728,7 @@ Provide a JSON with the structure:
|
|
552
728
|
)
|
553
729
|
self.tasks[row_task.id] = row_task
|
554
730
|
new_tasks.append(row_task)
|
555
|
-
|
731
|
+
|
556
732
|
if previous_task:
|
557
733
|
previous_task.next_tasks = [row_task.name]
|
558
734
|
previous_task.condition["complete"] = [row_task.name]
|
@@ -576,7 +752,7 @@ Provide a JSON with the structure:
|
|
576
752
|
)
|
577
753
|
self.tasks[row_task.id] = row_task
|
578
754
|
new_tasks.append(row_task)
|
579
|
-
|
755
|
+
|
580
756
|
if previous_task:
|
581
757
|
previous_task.next_tasks = [row_task.name]
|
582
758
|
previous_task.condition["complete"] = [row_task.name]
|
@@ -600,11 +776,11 @@ Next tasks: {current_task.next_tasks}
|
|
600
776
|
Context tasks: {[t.name for t in current_task.context] if current_task.context else []}
|
601
777
|
Description length: {len(current_task.description)}
|
602
778
|
""")
|
603
|
-
|
779
|
+
|
604
780
|
# Add context from previous tasks to description
|
605
781
|
if current_task.previous_tasks or current_task.context:
|
606
782
|
context = "\nInput data from previous tasks:"
|
607
|
-
|
783
|
+
|
608
784
|
# Add data from previous tasks in workflow
|
609
785
|
for prev_name in current_task.previous_tasks:
|
610
786
|
prev_task = next((t for t in self.tasks.values() if t.name == prev_name), None)
|
@@ -614,16 +790,16 @@ Description length: {len(current_task.description)}
|
|
614
790
|
context += f"\n{prev_name}: {prev_task.result.raw}"
|
615
791
|
else:
|
616
792
|
context += f"\n{prev_name}: {prev_task.result.raw}"
|
617
|
-
|
793
|
+
|
618
794
|
# Add data from context tasks
|
619
795
|
if current_task.context:
|
620
796
|
for ctx_task in current_task.context:
|
621
797
|
if ctx_task.result and ctx_task.name != current_task.name:
|
622
798
|
context += f"\n{ctx_task.name}: {ctx_task.result.raw}"
|
623
|
-
|
799
|
+
|
624
800
|
# Update task description with context
|
625
801
|
current_task.description = current_task.description + context
|
626
|
-
|
802
|
+
|
627
803
|
# Skip execution for loop tasks, only process their subtasks
|
628
804
|
if current_task.task_type == "loop":
|
629
805
|
logging.debug(f"""
|
@@ -636,14 +812,14 @@ Condition: {current_task.condition}
|
|
636
812
|
Subtasks created: {getattr(current_task, '_subtasks_created', False)}
|
637
813
|
Input file: {getattr(current_task, 'input_file', None)}
|
638
814
|
""")
|
639
|
-
|
815
|
+
|
640
816
|
# Check if subtasks are created and completed
|
641
817
|
if getattr(current_task, "_subtasks_created", False):
|
642
818
|
subtasks = [
|
643
819
|
t for t in self.tasks.values()
|
644
820
|
if t.name.startswith(current_task.name + "_")
|
645
821
|
]
|
646
|
-
|
822
|
+
|
647
823
|
logging.debug(f"""
|
648
824
|
=== Subtask Status Check ===
|
649
825
|
Total subtasks: {len(subtasks)}
|
@@ -661,24 +837,50 @@ Subtask: {st.name}
|
|
661
837
|
|
662
838
|
if subtasks and all(st.status == "completed" for st in subtasks):
|
663
839
|
logging.debug(f"=== All {len(subtasks)} subtasks completed for {current_task.name} ===")
|
664
|
-
|
840
|
+
|
665
841
|
# Mark loop task completed and move to next task
|
666
842
|
current_task.status = "completed"
|
667
843
|
logging.debug(f"Loop {current_task.name} marked as completed")
|
844
|
+
|
845
|
+
# Set result for loop task when all subtasks complete
|
846
|
+
if not current_task.result:
|
847
|
+
# Get result from last completed subtask
|
848
|
+
last_subtask = next((t for t in reversed(subtasks) if t.status == "completed"), None)
|
849
|
+
if last_subtask and last_subtask.result:
|
850
|
+
current_task.result = last_subtask.result
|
668
851
|
|
669
|
-
#
|
670
|
-
if current_task.
|
671
|
-
|
672
|
-
|
673
|
-
|
852
|
+
# Route to next task based on condition
|
853
|
+
if current_task.condition:
|
854
|
+
# Get decision from result if available
|
855
|
+
decision_str = None
|
856
|
+
if current_task.result:
|
857
|
+
if current_task.result.pydantic and hasattr(current_task.result.pydantic, "decision"):
|
858
|
+
decision_str = current_task.result.pydantic.decision.lower()
|
859
|
+
elif current_task.result.raw:
|
860
|
+
decision_str = current_task.result.raw.lower()
|
861
|
+
|
862
|
+
# For loop tasks, use "done" to follow condition path
|
863
|
+
if current_task.task_type == "loop" and all(t.status == "completed" for t in subtasks):
|
864
|
+
decision_str = "done"
|
865
|
+
|
866
|
+
target_tasks = current_task.condition.get(decision_str, []) if decision_str else []
|
867
|
+
task_value = target_tasks[0] if isinstance(target_tasks, list) else target_tasks
|
868
|
+
next_task = next((t for t in self.tasks.values() if t.name == task_value), None)
|
674
869
|
if next_task:
|
675
|
-
|
676
|
-
logging.debug(f"
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
870
|
+
next_task.status = "not started" # Reset status to allow execution
|
871
|
+
logging.debug(f"Routing to {next_task.name} based on decision: {decision_str}")
|
872
|
+
self.workflow_finished = False
|
873
|
+
current_task = next_task
|
874
|
+
# Ensure the task is yielded for execution
|
875
|
+
if current_task.id not in visited_tasks:
|
876
|
+
yield current_task.id
|
877
|
+
visited_tasks.add(current_task.id)
|
878
|
+
else:
|
879
|
+
# End workflow if no valid next task found
|
880
|
+
logging.info(f"No valid next task found for decision: {decision_str}")
|
881
|
+
self.workflow_finished = True
|
882
|
+
current_task = None
|
883
|
+
break
|
682
884
|
else:
|
683
885
|
logging.debug(f"No subtasks created yet for {current_task.name}")
|
684
886
|
# Create subtasks if needed
|
@@ -703,25 +905,39 @@ Subtask: {st.name}
|
|
703
905
|
logging.debug(f"Task next_tasks: {current_task.next_tasks}")
|
704
906
|
yield task_id
|
705
907
|
visited_tasks.add(task_id)
|
706
|
-
|
908
|
+
|
909
|
+
# Only end workflow if no next_tasks AND no conditions
|
910
|
+
if not current_task.next_tasks and not current_task.condition and not any(
|
911
|
+
t.task_type == "loop" and current_task.name.startswith(t.name + "_")
|
912
|
+
for t in self.tasks.values()
|
913
|
+
):
|
914
|
+
logging.info(f"Task {current_task.name} has no next tasks, ending workflow")
|
915
|
+
self.workflow_finished = True
|
916
|
+
current_task = None
|
917
|
+
break
|
918
|
+
|
707
919
|
# Reset completed task to "not started" so it can run again
|
708
920
|
if self.tasks[task_id].status == "completed":
|
709
|
-
# Never reset loop tasks, decision tasks, or their subtasks
|
921
|
+
# Never reset loop tasks, decision tasks, or their subtasks if rerun is False
|
710
922
|
subtask_name = self.tasks[task_id].name
|
923
|
+
task_to_check = self.tasks[task_id]
|
711
924
|
logging.debug(f"=== Checking reset for completed task: {subtask_name} ===")
|
712
|
-
logging.debug(f"Task type: {
|
713
|
-
logging.debug(f"Task status before reset check: {
|
714
|
-
|
715
|
-
|
716
|
-
|
925
|
+
logging.debug(f"Task type: {task_to_check.task_type}")
|
926
|
+
logging.debug(f"Task status before reset check: {task_to_check.status}")
|
927
|
+
logging.debug(f"Task rerun: {getattr(task_to_check, 'rerun', True)}") # default to True if not set
|
928
|
+
|
929
|
+
if (getattr(task_to_check, 'rerun', True) and # Corrected condition - reset only if rerun is True (or default True)
|
930
|
+
task_to_check.task_type != "loop" and # Removed "decision" from exclusion
|
931
|
+
not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
|
717
932
|
for t in self.tasks.values())):
|
718
933
|
logging.debug(f"=== Resetting non-loop, non-decision task {subtask_name} to 'not started' ===")
|
719
934
|
self.tasks[task_id].status = "not started"
|
720
935
|
logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
|
721
936
|
else:
|
722
|
-
logging.debug(f"=== Skipping reset for loop/decision/subtask: {subtask_name} ===")
|
937
|
+
logging.debug(f"=== Skipping reset for loop/decision/subtask or rerun=False: {subtask_name} ===")
|
723
938
|
logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
|
724
|
-
|
939
|
+
|
940
|
+
|
725
941
|
# Handle loop progression
|
726
942
|
if current_task.task_type == "loop":
|
727
943
|
loop_key = f"loop_{current_task.name}"
|
@@ -729,7 +945,7 @@ Subtask: {st.name}
|
|
729
945
|
loop_info = loop_data[loop_key]
|
730
946
|
loop_info["index"] += 1
|
731
947
|
has_more = loop_info["remaining"] > 0
|
732
|
-
|
948
|
+
|
733
949
|
# Update result to trigger correct condition
|
734
950
|
if current_task.result:
|
735
951
|
result = current_task.result.raw
|
@@ -738,41 +954,90 @@ Subtask: {st.name}
|
|
738
954
|
else:
|
739
955
|
result += "\ndone"
|
740
956
|
current_task.result.raw = result
|
741
|
-
|
957
|
+
|
742
958
|
# Determine next task based on result
|
743
959
|
next_task = None
|
744
960
|
if current_task and current_task.result:
|
745
961
|
if current_task.task_type in ["decision", "loop"]:
|
746
|
-
#
|
962
|
+
# Get decision from pydantic or raw response
|
747
963
|
decision_str = current_task.result.raw.lower()
|
748
964
|
if current_task.result.pydantic and hasattr(current_task.result.pydantic, "decision"):
|
749
965
|
decision_str = current_task.result.pydantic.decision.lower()
|
750
966
|
|
751
|
-
# Check conditions
|
752
|
-
|
753
|
-
|
754
|
-
|
755
|
-
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
|
760
|
-
next_task_name = task_value
|
761
|
-
next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
|
762
|
-
# For loops, allow revisiting the same task
|
763
|
-
if next_task and next_task.id == current_task.id:
|
764
|
-
visited_tasks.discard(current_task.id)
|
967
|
+
# Check if task has conditions and next_tasks
|
968
|
+
if current_task.condition:
|
969
|
+
# Get target task based on decision
|
970
|
+
target_tasks = current_task.condition.get(decision_str, [])
|
971
|
+
# Handle all forms of exit conditions
|
972
|
+
if not target_tasks or target_tasks == "exit" or (isinstance(target_tasks, list) and (not target_tasks or target_tasks[0] == "exit")):
|
973
|
+
logging.info(f"Workflow exit condition met on decision: {decision_str}")
|
974
|
+
self.workflow_finished = True
|
975
|
+
current_task = None
|
765
976
|
break
|
766
|
-
|
977
|
+
else:
|
978
|
+
# Find the target task by name
|
979
|
+
task_value = target_tasks[0] if isinstance(target_tasks, list) else target_tasks
|
980
|
+
next_task = next((t for t in self.tasks.values() if t.name == task_value), None)
|
981
|
+
if next_task:
|
982
|
+
next_task.status = "not started" # Reset status to allow execution
|
983
|
+
logging.debug(f"Routing to {next_task.name} based on decision: {decision_str}")
|
984
|
+
# Don't mark workflow as finished when following condition path
|
985
|
+
self.workflow_finished = False
|
986
|
+
|
987
|
+
# If no condition-based routing, use next_tasks
|
767
988
|
if not next_task and current_task and current_task.next_tasks:
|
768
989
|
next_task_name = current_task.next_tasks[0]
|
769
990
|
next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
|
770
|
-
|
991
|
+
if next_task:
|
992
|
+
# Reset the next task to allow re-execution
|
993
|
+
next_task.status = "not started"
|
994
|
+
# Don't mark workflow as finished if we're in a task loop
|
995
|
+
if (next_task.previous_tasks and current_task.name in next_task.previous_tasks and
|
996
|
+
next_task.next_tasks and
|
997
|
+
next_task.next_tasks[0] in self.tasks and
|
998
|
+
next_task.name in self.tasks[next_task.next_tasks[0]].previous_tasks):
|
999
|
+
self.workflow_finished = False
|
1000
|
+
logging.debug(f"Following next_tasks to {next_task.name}")
|
1001
|
+
|
771
1002
|
current_task = next_task
|
772
1003
|
if not current_task:
|
1004
|
+
current_task = self._find_next_not_started_task() # General fallback if no next task in workflow
|
1005
|
+
|
1006
|
+
|
1007
|
+
if not current_task:
|
1008
|
+
# Add final workflow summary
|
1009
|
+
logging.debug(f"""
|
1010
|
+
=== Final Workflow Summary ===
|
1011
|
+
Total tasks processed: {len(self.tasks)}
|
1012
|
+
Final status:
|
1013
|
+
- Completed tasks: {sum(1 for t in self.tasks.values() if t.status == "completed")}
|
1014
|
+
- Outstanding tasks: {sum(1 for t in self.tasks.values() if t.status != "completed")}
|
1015
|
+
Tasks by status:
|
1016
|
+
- Not started: {sum(1 for t in self.tasks.values() if t.status == "not started")}
|
1017
|
+
- In progress: {sum(1 for t in self.tasks.values() if t.status == "in_progress")}
|
1018
|
+
- Completed: {sum(1 for t in self.tasks.values() if t.status == "completed")}
|
1019
|
+
- Failed: {sum(1 for t in self.tasks.values() if t.status == "failed")}
|
1020
|
+
Tasks by type:
|
1021
|
+
- Loop tasks: {sum(1 for t in self.tasks.values() if t.task_type == "loop")}
|
1022
|
+
- Decision tasks: {sum(1 for t in self.tasks.values() if t.task_type == "decision")}
|
1023
|
+
- Regular tasks: {sum(1 for t in self.tasks.values() if t.task_type not in ["loop", "decision"])}
|
1024
|
+
Total iterations: {current_iter}
|
1025
|
+
Workflow Finished: {self.workflow_finished} # ADDED: Workflow Finished Status
|
1026
|
+
""")
|
1027
|
+
|
773
1028
|
logging.info("Workflow execution completed")
|
774
1029
|
break
|
775
1030
|
|
1031
|
+
# Add completion logging
|
1032
|
+
logging.debug(f"""
|
1033
|
+
=== Task Completion ===
|
1034
|
+
Task: {current_task.name}
|
1035
|
+
Final status: {current_task.status}
|
1036
|
+
Next task: {next_task.name if next_task else None}
|
1037
|
+
Iteration: {current_iter}/{self.max_iter}
|
1038
|
+
Workflow Finished: {self.workflow_finished} # ADDED: Workflow Finished Status
|
1039
|
+
""")
|
1040
|
+
|
776
1041
|
def sequential(self):
|
777
1042
|
"""Synchronous version of sequential method"""
|
778
1043
|
for task_id in self.tasks:
|
@@ -891,4 +1156,4 @@ Provide a JSON with the structure:
|
|
891
1156
|
self.tasks[manager_task.id].status = "completed"
|
892
1157
|
if self.verbose >= 1:
|
893
1158
|
logging.info("All tasks completed under manager supervision.")
|
894
|
-
logging.info("Hierarchical task execution finished")
|
1159
|
+
logging.info("Hierarchical task execution finished")
|
praisonaiagents/task/task.py
CHANGED
@@ -38,7 +38,8 @@ class Task:
|
|
38
38
|
loop_state: Optional[Dict[str, Union[str, int]]] = None,
|
39
39
|
memory=None,
|
40
40
|
quality_check=True,
|
41
|
-
input_file: Optional[str] = None
|
41
|
+
input_file: Optional[str] = None,
|
42
|
+
rerun: bool = False # Renamed from can_rerun and logic inverted, default True for backward compatibility
|
42
43
|
):
|
43
44
|
# Add check if memory config is provided
|
44
45
|
if memory is not None or (config and config.get('memory_config')):
|
@@ -76,6 +77,7 @@ class Task:
|
|
76
77
|
self.loop_state = loop_state if loop_state else {}
|
77
78
|
self.memory = memory
|
78
79
|
self.quality_check = quality_check
|
80
|
+
self.rerun = rerun # Assigning the rerun parameter
|
79
81
|
|
80
82
|
# Set logger level based on config verbose level
|
81
83
|
verbose = self.config.get("verbose", 0)
|
@@ -83,7 +85,7 @@ class Task:
|
|
83
85
|
logger.setLevel(logging.INFO)
|
84
86
|
else:
|
85
87
|
logger.setLevel(logging.WARNING)
|
86
|
-
|
88
|
+
|
87
89
|
# Also set third-party loggers to WARNING
|
88
90
|
logging.getLogger('chromadb').setLevel(logging.WARNING)
|
89
91
|
logging.getLogger('openai').setLevel(logging.WARNING)
|
@@ -148,7 +150,7 @@ class Task:
|
|
148
150
|
logger.info(f"Task {self.id}: Initializing memory from config: {self.config['memory_config']}")
|
149
151
|
self.memory = Memory(config=self.config['memory_config'])
|
150
152
|
logger.info(f"Task {self.id}: Memory initialized successfully")
|
151
|
-
|
153
|
+
|
152
154
|
# Verify database was created
|
153
155
|
if os.path.exists(self.config['memory_config']['storage']['path']):
|
154
156
|
logger.info(f"Task {self.id}: Memory database exists after initialization")
|
@@ -182,11 +184,11 @@ class Task:
|
|
182
184
|
"""Execute callback and store quality metrics if enabled"""
|
183
185
|
logger.info(f"Task {self.id}: execute_callback called")
|
184
186
|
logger.info(f"Quality check enabled: {self.quality_check}")
|
185
|
-
|
187
|
+
|
186
188
|
# Initialize memory if not already initialized
|
187
189
|
if not self.memory:
|
188
190
|
self.memory = self.initialize_memory()
|
189
|
-
|
191
|
+
|
190
192
|
logger.info(f"Memory object exists: {self.memory is not None}")
|
191
193
|
if self.memory:
|
192
194
|
logger.info(f"Memory config: {self.memory.cfg}")
|
@@ -202,24 +204,24 @@ class Task:
|
|
202
204
|
except Exception as e:
|
203
205
|
logger.error(f"Task {self.id}: Failed to store task output in memory: {e}")
|
204
206
|
logger.exception(e)
|
205
|
-
|
207
|
+
|
206
208
|
logger.info(f"Task output: {task_output.raw[:100]}...")
|
207
|
-
|
209
|
+
|
208
210
|
if self.quality_check and self.memory:
|
209
211
|
try:
|
210
212
|
logger.info(f"Task {self.id}: Starting memory operations")
|
211
213
|
logger.info(f"Task {self.id}: Calculating quality metrics for output: {task_output.raw[:100]}...")
|
212
|
-
|
214
|
+
|
213
215
|
# Get quality metrics from LLM
|
214
216
|
metrics = self.memory.calculate_quality_metrics(
|
215
217
|
task_output.raw,
|
216
218
|
self.expected_output
|
217
219
|
)
|
218
220
|
logger.info(f"Task {self.id}: Quality metrics calculated: {metrics}")
|
219
|
-
|
221
|
+
|
220
222
|
quality_score = metrics.get("accuracy", 0.0)
|
221
223
|
logger.info(f"Task {self.id}: Quality score: {quality_score}")
|
222
|
-
|
224
|
+
|
223
225
|
# Store in both short and long-term memory with higher threshold
|
224
226
|
logger.info(f"Task {self.id}: Finalizing task output in memory...")
|
225
227
|
self.memory.finalize_task_output(
|
@@ -231,7 +233,7 @@ class Task:
|
|
231
233
|
task_id=self.id
|
232
234
|
)
|
233
235
|
logger.info(f"Task {self.id}: Finalized task output in memory")
|
234
|
-
|
236
|
+
|
235
237
|
# Store quality metrics separately
|
236
238
|
logger.info(f"Task {self.id}: Storing quality metrics...")
|
237
239
|
self.memory.store_quality(
|
@@ -240,7 +242,7 @@ class Task:
|
|
240
242
|
task_id=self.id,
|
241
243
|
metrics=metrics
|
242
244
|
)
|
243
|
-
|
245
|
+
|
244
246
|
# Store in both short and long-term memory with higher threshold
|
245
247
|
self.memory.finalize_task_output(
|
246
248
|
content=task_output.raw,
|
@@ -248,7 +250,7 @@ class Task:
|
|
248
250
|
quality_score=quality_score,
|
249
251
|
threshold=0.7 # Only high quality outputs in long-term memory
|
250
252
|
)
|
251
|
-
|
253
|
+
|
252
254
|
# Build context for next tasks
|
253
255
|
if self.next_tasks:
|
254
256
|
logger.info(f"Task {self.id}: Building context for next tasks...")
|
@@ -257,7 +259,7 @@ class Task:
|
|
257
259
|
max_items=5
|
258
260
|
)
|
259
261
|
logger.info(f"Task {self.id}: Built context for next tasks: {len(context)} items")
|
260
|
-
|
262
|
+
|
261
263
|
logger.info(f"Task {self.id}: Memory operations complete")
|
262
264
|
except Exception as e:
|
263
265
|
logger.error(f"Task {self.id}: Failed to process memory operations: {e}")
|
@@ -295,7 +297,7 @@ Expected Output: {self.expected_output}.
|
|
295
297
|
context_results.append(
|
296
298
|
f"Previous task {context_item.name if context_item.name else context_item.description} has no result yet."
|
297
299
|
)
|
298
|
-
|
300
|
+
|
299
301
|
# Join unique context results
|
300
302
|
unique_contexts = list(dict.fromkeys(context_results)) # Remove duplicates
|
301
303
|
task_prompt += f"""
|
@@ -307,7 +309,7 @@ Context:
|
|
307
309
|
def execute_callback_sync(self, task_output: TaskOutput) -> None:
|
308
310
|
"""
|
309
311
|
Synchronous wrapper to ensure that execute_callback is awaited,
|
310
|
-
preventing 'Task was destroyed but pending!' warnings if called
|
312
|
+
preventing 'Task was destroyed but pending!' warnings if called
|
311
313
|
from non-async code.
|
312
314
|
"""
|
313
315
|
import asyncio
|
@@ -12,9 +12,9 @@ praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRW
|
|
12
12
|
praisonaiagents/llm/llm.py,sha256=G2wKMwitWBJRS6nOq9W77zXtsxvJwsVwXFOKYcllY0E,51386
|
13
13
|
praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
|
14
14
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
15
|
-
praisonaiagents/process/process.py,sha256=
|
15
|
+
praisonaiagents/process/process.py,sha256=HPw84OhnKQW3EyrDkpoQu0DcpxThbrzR2hWUgwQh9Pw,59955
|
16
16
|
praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
17
|
-
praisonaiagents/task/task.py,sha256=
|
17
|
+
praisonaiagents/task/task.py,sha256=8KztSUKMO74y619TyI8d5DMJ1xPbyQXIB0Ux583oOVw,14259
|
18
18
|
praisonaiagents/tools/__init__.py,sha256=CWOYV9SudYY82r45LnNgaVRV3cmsAFdasNRkPrLsgmI,9198
|
19
19
|
praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
|
20
20
|
praisonaiagents/tools/calculator_tools.py,sha256=S1xPT74Geurvjm52QMMIG29zDXVEWJmM6nmyY7yF298,9571
|
@@ -36,7 +36,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
36
36
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
37
37
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
38
38
|
praisonaiagents/tools/train/data/generatecot.py,sha256=k1gZHtgY1poVp5kajhgs4S9a4-epdA8NyZfYTa34lQU,17651
|
39
|
-
praisonaiagents-0.0.
|
40
|
-
praisonaiagents-0.0.
|
41
|
-
praisonaiagents-0.0.
|
42
|
-
praisonaiagents-0.0.
|
39
|
+
praisonaiagents-0.0.59.dist-info/METADATA,sha256=w6DYqKW5P9b2Rqu02j5Lt4-6K7f-InDrtUfV7fYu_FM,830
|
40
|
+
praisonaiagents-0.0.59.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
41
|
+
praisonaiagents-0.0.59.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
42
|
+
praisonaiagents-0.0.59.dist-info/RECORD,,
|
File without changes
|
File without changes
|