praisonaiagents 0.0.57__py3-none-any.whl → 0.0.58__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,6 +13,13 @@ class LoopItems(BaseModel):
13
13
 
14
14
  class Process:
15
15
  def __init__(self, tasks: Dict[str, Task], agents: List[Agent], manager_llm: Optional[str] = None, verbose: bool = False, max_iter: int = 10):
16
+ logging.debug(f"=== Initializing Process ===")
17
+ logging.debug(f"Number of tasks: {len(tasks)}")
18
+ logging.debug(f"Number of agents: {len(agents)}")
19
+ logging.debug(f"Manager LLM: {manager_llm}")
20
+ logging.debug(f"Verbose mode: {verbose}")
21
+ logging.debug(f"Max iterations: {max_iter}")
22
+
16
23
  self.tasks = tasks
17
24
  self.agents = agents
18
25
  self.manager_llm = manager_llm
@@ -21,25 +28,30 @@ class Process:
21
28
 
22
29
  async def aworkflow(self) -> AsyncGenerator[str, None]:
23
30
  """Async version of workflow method"""
31
+ logging.debug("=== Starting Async Workflow ===")
24
32
  current_iter = 0 # Track how many times we've looped
25
33
  # Build workflow relationships first
34
+ logging.debug("Building workflow relationships...")
26
35
  for task in self.tasks.values():
27
36
  if task.next_tasks:
28
37
  for next_task_name in task.next_tasks:
29
38
  next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
30
39
  if next_task:
31
40
  next_task.previous_tasks.append(task.name)
41
+ logging.debug(f"Added {task.name} as previous task for {next_task_name}")
32
42
 
33
43
  # Find start task
44
+ logging.debug("Finding start task...")
34
45
  start_task = None
35
46
  for task_id, task in self.tasks.items():
36
47
  if task.is_start:
37
48
  start_task = task
49
+ logging.debug(f"Found marked start task: {task.name} (id: {task_id})")
38
50
  break
39
51
 
40
52
  if not start_task:
41
53
  start_task = list(self.tasks.values())[0]
42
- logging.info("No start task marked, using first task")
54
+ logging.debug(f"No start task marked, using first task: {start_task.name}")
43
55
 
44
56
  current_task = start_task
45
57
  visited_tasks = set()
@@ -54,7 +66,16 @@ class Process:
54
66
  break
55
67
 
56
68
  task_id = current_task.id
57
- logging.info(f"Executing workflow task: {current_task.name if current_task.name else task_id}")
69
+ logging.debug(f"""
70
+ === Task Execution Details ===
71
+ Current task: {current_task.name}
72
+ Type: {current_task.task_type}
73
+ Status: {current_task.status}
74
+ Previous tasks: {current_task.previous_tasks}
75
+ Next tasks: {current_task.next_tasks}
76
+ Context tasks: {[t.name for t in current_task.context] if current_task.context else []}
77
+ Description length: {len(current_task.description)}
78
+ """)
58
79
 
59
80
  # Add context from previous tasks to description
60
81
  if current_task.previous_tasks or current_task.context:
@@ -66,46 +87,6 @@ class Process:
66
87
  if prev_task and prev_task.result:
67
88
  # Handle loop data
68
89
  if current_task.task_type == "loop":
69
- # # create a loop manager Agent
70
- # loop_manager = Agent(
71
- # name="Loop Manager",
72
- # role="Loop data processor",
73
- # goal="Process loop data and convert it to list format",
74
- # backstory="Expert at handling loop data and converting it to proper format",
75
- # llm=self.manager_llm,
76
- # verbose=self.verbose,
77
- # markdown=True
78
- # )
79
-
80
- # # get the loop data convert it to list using calling Agent class chat
81
- # loop_prompt = f"""
82
- # Process this data into a list format:
83
- # {prev_task.result.raw}
84
-
85
- # Return a JSON object with an 'items' array containing the items to process.
86
- # """
87
- # if current_task.async_execution:
88
- # loop_data_str = await loop_manager.achat(
89
- # prompt=loop_prompt,
90
- # output_json=LoopItems
91
- # )
92
- # else:
93
- # loop_data_str = loop_manager.chat(
94
- # prompt=loop_prompt,
95
- # output_json=LoopItems
96
- # )
97
-
98
- # try:
99
- # # The response will already be parsed into LoopItems model
100
- # loop_data[f"loop_{current_task.name}"] = {
101
- # "items": loop_data_str.items,
102
- # "index": 0,
103
- # "remaining": len(loop_data_str.items)
104
- # }
105
- # context += f"\nCurrent loop item: {loop_data_str.items[0]}"
106
- # except Exception as e:
107
- # display_error(f"Failed to process loop data: {e}")
108
- # context += f"\n{prev_name}: {prev_task.result.raw}"
109
90
  context += f"\n{prev_name}: {prev_task.result.raw}"
110
91
  else:
111
92
  context += f"\n{prev_name}: {prev_task.result.raw}"
@@ -119,14 +100,103 @@ class Process:
119
100
  # Update task description with context
120
101
  current_task.description = current_task.description + context
121
102
 
122
- # Execute task using existing run_task method
123
- yield task_id
124
- visited_tasks.add(task_id)
103
+ # Skip execution for loop tasks, only process their subtasks
104
+ if current_task.task_type == "loop":
105
+ logging.debug(f"""
106
+ === Loop Task Details ===
107
+ Name: {current_task.name}
108
+ ID: {current_task.id}
109
+ Status: {current_task.status}
110
+ Next tasks: {current_task.next_tasks}
111
+ Condition: {current_task.condition}
112
+ Subtasks created: {getattr(current_task, '_subtasks_created', False)}
113
+ Input file: {getattr(current_task, 'input_file', None)}
114
+ """)
115
+
116
+ # Check if subtasks are created and completed
117
+ if getattr(current_task, "_subtasks_created", False):
118
+ subtasks = [
119
+ t for t in self.tasks.values()
120
+ if t.name.startswith(current_task.name + "_")
121
+ ]
122
+ logging.debug(f"""
123
+ === Subtask Status Check ===
124
+ Total subtasks: {len(subtasks)}
125
+ Completed: {sum(1 for st in subtasks if st.status == "completed")}
126
+ Pending: {sum(1 for st in subtasks if st.status != "completed")}
127
+ """)
128
+
129
+ # Log detailed subtask info
130
+ for st in subtasks:
131
+ logging.debug(f"""
132
+ Subtask: {st.name}
133
+ - Status: {st.status}
134
+ - Next tasks: {st.next_tasks}
135
+ - Condition: {st.condition}
136
+ """)
137
+
138
+ if subtasks and all(st.status == "completed" for st in subtasks):
139
+ logging.debug(f"=== All {len(subtasks)} subtasks completed for {current_task.name} ===")
140
+
141
+ # Mark loop task completed and move to next task
142
+ current_task.status = "completed"
143
+ logging.debug(f"Loop {current_task.name} marked as completed")
144
+
145
+ # Move to next task if available
146
+ if current_task.next_tasks:
147
+ next_task_name = current_task.next_tasks[0]
148
+ logging.debug(f"Attempting transition to next task: {next_task_name}")
149
+ next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
150
+ if next_task:
151
+ logging.debug(f"=== Transitioning: {current_task.name} -> {next_task.name} ===")
152
+ logging.debug(f"Next task status: {next_task.status}")
153
+ logging.debug(f"Next task condition: {next_task.condition}")
154
+ current_task = next_task
155
+ else:
156
+ logging.debug(f"=== No next tasks for {current_task.name}, ending loop ===")
157
+ current_task = None
158
+ else:
159
+ logging.debug(f"No subtasks created yet for {current_task.name}")
160
+ # Create subtasks if needed
161
+ if current_task.input_file:
162
+ self._create_loop_subtasks(current_task)
163
+ current_task._subtasks_created = True
164
+ logging.debug(f"Created subtasks from {current_task.input_file}")
165
+ else:
166
+ # No input file, mark as done
167
+ current_task.status = "completed"
168
+ logging.debug(f"No input file, marking {current_task.name} as completed")
169
+ if current_task.next_tasks:
170
+ next_task_name = current_task.next_tasks[0]
171
+ next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
172
+ current_task = next_task
173
+ else:
174
+ current_task = None
175
+ else:
176
+ # Execute non-loop task
177
+ logging.debug(f"=== Executing non-loop task: {current_task.name} (id: {task_id}) ===")
178
+ logging.debug(f"Task status: {current_task.status}")
179
+ logging.debug(f"Task next_tasks: {current_task.next_tasks}")
180
+ yield task_id
181
+ visited_tasks.add(task_id)
125
182
 
126
183
  # Reset completed task to "not started" so it can run again
127
184
  if self.tasks[task_id].status == "completed":
128
- logging.debug(f"Task {task_id} was completed, resetting to 'not started' for next iteration.")
129
- self.tasks[task_id].status = "not started"
185
+ # Never reset loop tasks, decision tasks, or their subtasks
186
+ subtask_name = self.tasks[task_id].name
187
+ logging.debug(f"=== Checking reset for completed task: {subtask_name} ===")
188
+ logging.debug(f"Task type: {self.tasks[task_id].task_type}")
189
+ logging.debug(f"Task status before reset check: {self.tasks[task_id].status}")
190
+
191
+ if (self.tasks[task_id].task_type not in ["loop", "decision"] and
192
+ not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
193
+ for t in self.tasks.values())):
194
+ logging.debug(f"=== Resetting non-loop, non-decision task {subtask_name} to 'not started' ===")
195
+ self.tasks[task_id].status = "not started"
196
+ logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
197
+ else:
198
+ logging.debug(f"=== Skipping reset for loop/decision/subtask: {subtask_name} ===")
199
+ logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
130
200
 
131
201
  # Handle loop progression
132
202
  if current_task.task_type == "loop":
@@ -179,6 +249,15 @@ class Process:
179
249
  logging.info("Workflow execution completed")
180
250
  break
181
251
 
252
+ # Add completion logging
253
+ logging.debug(f"""
254
+ === Task Completion ===
255
+ Task: {current_task.name}
256
+ Final status: {current_task.status}
257
+ Next task: {next_task.name if next_task else None}
258
+ Iteration: {current_iter}/{self.max_iter}
259
+ """)
260
+
182
261
  async def asequential(self) -> AsyncGenerator[str, None]:
183
262
  """Async version of sequential method"""
184
263
  for task_id in self.tasks:
@@ -343,33 +422,59 @@ Provide a JSON with the structure:
343
422
  new_tasks = []
344
423
 
345
424
  if file_ext == ".csv":
346
- # existing CSV reading logic
347
425
  with open(start_task.input_file, "r", encoding="utf-8") as f:
348
- # Try as simple CSV first
349
- reader = csv.reader(f)
426
+ reader = csv.reader(f, quotechar='"', escapechar='\\') # Handle quoted/escaped fields
350
427
  previous_task = None
428
+ task_count = 0
429
+
351
430
  for i, row in enumerate(reader):
352
- if row: # Skip empty rows
353
- task_desc = row[0] # Take first column
354
- row_task = Task(
355
- description=f"{start_task.description}\n{task_desc}" if start_task.description else task_desc,
356
- agent=start_task.agent,
357
- name=f"{start_task.name}_{i+1}" if start_task.name else task_desc,
358
- expected_output=getattr(start_task, 'expected_output', None),
359
- is_start=(i == 0),
360
- task_type="task",
361
- condition={
362
- "complete": ["next"],
363
- "retry": ["current"]
364
- }
365
- )
366
- self.tasks[row_task.id] = row_task
367
- new_tasks.append(row_task)
431
+ if not row: # Skip truly empty rows
432
+ continue
368
433
 
369
- if previous_task:
370
- previous_task.next_tasks = [row_task.name]
371
- previous_task.condition["complete"] = [row_task.name]
372
- previous_task = row_task
434
+ # Properly handle Q&A pairs with potential commas
435
+ task_desc = row[0].strip() if row else ""
436
+ if len(row) > 1:
437
+ # Preserve all fields in case of multiple commas
438
+ question = row[0].strip()
439
+ answer = ",".join(field.strip() for field in row[1:])
440
+ task_desc = f"Question: {question}\nAnswer: {answer}"
441
+
442
+ if not task_desc: # Skip rows with empty content
443
+ continue
444
+
445
+ task_count += 1
446
+ logging.debug(f"Processing CSV row {i+1}: {task_desc}")
447
+
448
+ # Inherit next_tasks from parent loop task
449
+ inherited_next_tasks = start_task.next_tasks if start_task.next_tasks else []
450
+
451
+ row_task = Task(
452
+ description=f"{start_task.description}\n{task_desc}" if start_task.description else task_desc,
453
+ agent=start_task.agent,
454
+ name=f"{start_task.name}_{task_count}" if start_task.name else task_desc,
455
+ expected_output=getattr(start_task, 'expected_output', None),
456
+ is_start=(task_count == 1),
457
+ task_type="decision", # Change to decision type
458
+ next_tasks=inherited_next_tasks, # Inherit parent's next tasks
459
+ condition={
460
+ "done": inherited_next_tasks if inherited_next_tasks else ["next"], # Use full inherited_next_tasks
461
+ "retry": ["current"],
462
+ "exit": [] # Empty list for exit condition
463
+ }
464
+ )
465
+ self.tasks[row_task.id] = row_task
466
+ new_tasks.append(row_task)
467
+
468
+ if previous_task:
469
+ previous_task.next_tasks = [row_task.name]
470
+ previous_task.condition["done"] = [row_task.name] # Use "done" consistently
471
+ previous_task = row_task
472
+
473
+ # For the last task in the loop, ensure it points to parent's next tasks
474
+ if task_count > 0 and not row_task.next_tasks:
475
+ row_task.next_tasks = inherited_next_tasks
476
+
477
+ logging.info(f"Processed {task_count} rows from CSV file")
373
478
  else:
374
479
  # If not CSV, read lines
375
480
  with open(start_task.input_file, "r", encoding="utf-8") as f:
@@ -402,7 +507,7 @@ Provide a JSON with the structure:
402
507
  except Exception as e:
403
508
  logging.error(f"Failed to read file tasks: {e}")
404
509
 
405
- # end of the new block
510
+ # end of start task handling
406
511
  current_task = start_task
407
512
  visited_tasks = set()
408
513
  loop_data = {} # Store loop-specific data
@@ -413,8 +518,88 @@ Provide a JSON with the structure:
413
518
  logging.info(f"Max iteration limit {self.max_iter} reached, ending workflow.")
414
519
  break
415
520
 
521
+ # Handle loop task file reading at runtime
522
+ if (current_task.task_type == "loop" and
523
+ current_task is not start_task and
524
+ getattr(current_task, "_subtasks_created", False) is not True):
525
+
526
+ if not current_task.input_file:
527
+ current_task.input_file = "tasks.csv"
528
+
529
+ if getattr(current_task, "input_file", None):
530
+ try:
531
+ file_ext = os.path.splitext(current_task.input_file)[1].lower()
532
+ new_tasks = []
533
+
534
+ if file_ext == ".csv":
535
+ with open(current_task.input_file, "r", encoding="utf-8") as f:
536
+ reader = csv.reader(f)
537
+ previous_task = None
538
+ for i, row in enumerate(reader):
539
+ if row: # Skip empty rows
540
+ task_desc = row[0] # Take first column
541
+ row_task = Task(
542
+ description=f"{current_task.description}\n{task_desc}" if current_task.description else task_desc,
543
+ agent=current_task.agent,
544
+ name=f"{current_task.name}_{i+1}" if current_task.name else task_desc,
545
+ expected_output=getattr(current_task, 'expected_output', None),
546
+ is_start=(i == 0),
547
+ task_type="task",
548
+ condition={
549
+ "complete": ["next"],
550
+ "retry": ["current"]
551
+ }
552
+ )
553
+ self.tasks[row_task.id] = row_task
554
+ new_tasks.append(row_task)
555
+
556
+ if previous_task:
557
+ previous_task.next_tasks = [row_task.name]
558
+ previous_task.condition["complete"] = [row_task.name]
559
+ previous_task = row_task
560
+ else:
561
+ with open(current_task.input_file, "r", encoding="utf-8") as f:
562
+ lines = f.read().splitlines()
563
+ previous_task = None
564
+ for i, line in enumerate(lines):
565
+ row_task = Task(
566
+ description=f"{current_task.description}\n{line.strip()}" if current_task.description else line.strip(),
567
+ agent=current_task.agent,
568
+ name=f"{current_task.name}_{i+1}" if current_task.name else line.strip(),
569
+ expected_output=getattr(current_task, 'expected_output', None),
570
+ is_start=(i == 0),
571
+ task_type="task",
572
+ condition={
573
+ "complete": ["next"],
574
+ "retry": ["current"]
575
+ }
576
+ )
577
+ self.tasks[row_task.id] = row_task
578
+ new_tasks.append(row_task)
579
+
580
+ if previous_task:
581
+ previous_task.next_tasks = [row_task.name]
582
+ previous_task.condition["complete"] = [row_task.name]
583
+ previous_task = row_task
584
+
585
+ if new_tasks:
586
+ current_task.next_tasks = [new_tasks[0].name]
587
+ current_task._subtasks_created = True
588
+ logging.info(f"Created {len(new_tasks)} tasks from: {current_task.input_file} for loop task {current_task.name}")
589
+ except Exception as e:
590
+ logging.error(f"Failed to read file tasks for loop task {current_task.name}: {e}")
591
+
416
592
  task_id = current_task.id
417
- logging.info(f"Executing workflow task: {current_task.name if current_task.name else task_id}")
593
+ logging.debug(f"""
594
+ === Task Execution Details ===
595
+ Current task: {current_task.name}
596
+ Type: {current_task.task_type}
597
+ Status: {current_task.status}
598
+ Previous tasks: {current_task.previous_tasks}
599
+ Next tasks: {current_task.next_tasks}
600
+ Context tasks: {[t.name for t in current_task.context] if current_task.context else []}
601
+ Description length: {len(current_task.description)}
602
+ """)
418
603
 
419
604
  # Add context from previous tasks to description
420
605
  if current_task.previous_tasks or current_task.context:
@@ -426,40 +611,6 @@ Provide a JSON with the structure:
426
611
  if prev_task and prev_task.result:
427
612
  # Handle loop data
428
613
  if current_task.task_type == "loop":
429
- # # create a loop manager Agent
430
- # loop_manager = Agent(
431
- # name="Loop Manager",
432
- # role="Loop data processor",
433
- # goal="Process loop data and convert it to list format",
434
- # backstory="Expert at handling loop data and converting it to proper format",
435
- # llm=self.manager_llm,
436
- # verbose=self.verbose,
437
- # markdown=True
438
- # )
439
-
440
- # # get the loop data convert it to list using calling Agent class chat
441
- # loop_prompt = f"""
442
- # Process this data into a list format:
443
- # {prev_task.result.raw}
444
-
445
- # Return a JSON object with an 'items' array containing the items to process.
446
- # """
447
- # loop_data_str = loop_manager.chat(
448
- # prompt=loop_prompt,
449
- # output_json=LoopItems
450
- # )
451
-
452
- # try:
453
- # # The response will already be parsed into LoopItems model
454
- # loop_data[f"loop_{current_task.name}"] = {
455
- # "items": loop_data_str.items,
456
- # "index": 0,
457
- # "remaining": len(loop_data_str.items)
458
- # }
459
- # context += f"\nCurrent loop item: {loop_data_str.items[0]}"
460
- # except Exception as e:
461
- # display_error(f"Failed to process loop data: {e}")
462
- # context += f"\n{prev_name}: {prev_task.result.raw}"
463
614
  context += f"\n{prev_name}: {prev_task.result.raw}"
464
615
  else:
465
616
  context += f"\n{prev_name}: {prev_task.result.raw}"
@@ -473,14 +624,103 @@ Provide a JSON with the structure:
473
624
  # Update task description with context
474
625
  current_task.description = current_task.description + context
475
626
 
476
- # Execute task using existing run_task method
477
- yield task_id
478
- visited_tasks.add(task_id)
627
+ # Skip execution for loop tasks, only process their subtasks
628
+ if current_task.task_type == "loop":
629
+ logging.debug(f"""
630
+ === Loop Task Details ===
631
+ Name: {current_task.name}
632
+ ID: {current_task.id}
633
+ Status: {current_task.status}
634
+ Next tasks: {current_task.next_tasks}
635
+ Condition: {current_task.condition}
636
+ Subtasks created: {getattr(current_task, '_subtasks_created', False)}
637
+ Input file: {getattr(current_task, 'input_file', None)}
638
+ """)
639
+
640
+ # Check if subtasks are created and completed
641
+ if getattr(current_task, "_subtasks_created", False):
642
+ subtasks = [
643
+ t for t in self.tasks.values()
644
+ if t.name.startswith(current_task.name + "_")
645
+ ]
646
+
647
+ logging.debug(f"""
648
+ === Subtask Status Check ===
649
+ Total subtasks: {len(subtasks)}
650
+ Completed: {sum(1 for st in subtasks if st.status == "completed")}
651
+ Pending: {sum(1 for st in subtasks if st.status != "completed")}
652
+ """)
653
+
654
+ for st in subtasks:
655
+ logging.debug(f"""
656
+ Subtask: {st.name}
657
+ - Status: {st.status}
658
+ - Next tasks: {st.next_tasks}
659
+ - Condition: {st.condition}
660
+ """)
661
+
662
+ if subtasks and all(st.status == "completed" for st in subtasks):
663
+ logging.debug(f"=== All {len(subtasks)} subtasks completed for {current_task.name} ===")
664
+
665
+ # Mark loop task completed and move to next task
666
+ current_task.status = "completed"
667
+ logging.debug(f"Loop {current_task.name} marked as completed")
668
+
669
+ # Move to next task if available
670
+ if current_task.next_tasks:
671
+ next_task_name = current_task.next_tasks[0]
672
+ logging.debug(f"Attempting transition to next task: {next_task_name}")
673
+ next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
674
+ if next_task:
675
+ logging.debug(f"=== Transitioning: {current_task.name} -> {next_task.name} ===")
676
+ logging.debug(f"Next task status: {next_task.status}")
677
+ logging.debug(f"Next task condition: {next_task.condition}")
678
+ current_task = next_task
679
+ else:
680
+ logging.debug(f"=== No next tasks for {current_task.name}, ending loop ===")
681
+ current_task = None
682
+ else:
683
+ logging.debug(f"No subtasks created yet for {current_task.name}")
684
+ # Create subtasks if needed
685
+ if current_task.input_file:
686
+ self._create_loop_subtasks(current_task)
687
+ current_task._subtasks_created = True
688
+ logging.debug(f"Created subtasks from {current_task.input_file}")
689
+ else:
690
+ # No input file, mark as done
691
+ current_task.status = "completed"
692
+ logging.debug(f"No input file, marking {current_task.name} as completed")
693
+ if current_task.next_tasks:
694
+ next_task_name = current_task.next_tasks[0]
695
+ next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
696
+ current_task = next_task
697
+ else:
698
+ current_task = None
699
+ else:
700
+ # Execute non-loop task
701
+ logging.debug(f"=== Executing non-loop task: {current_task.name} (id: {task_id}) ===")
702
+ logging.debug(f"Task status: {current_task.status}")
703
+ logging.debug(f"Task next_tasks: {current_task.next_tasks}")
704
+ yield task_id
705
+ visited_tasks.add(task_id)
479
706
 
480
- # Reset completed task to "not started" so it can run again: Only for workflow because some tasks may be revisited
707
+ # Reset completed task to "not started" so it can run again
481
708
  if self.tasks[task_id].status == "completed":
482
- logging.debug(f"Task {task_id} was completed, resetting to 'not started' for next iteration.")
483
- self.tasks[task_id].status = "not started"
709
+ # Never reset loop tasks, decision tasks, or their subtasks
710
+ subtask_name = self.tasks[task_id].name
711
+ logging.debug(f"=== Checking reset for completed task: {subtask_name} ===")
712
+ logging.debug(f"Task type: {self.tasks[task_id].task_type}")
713
+ logging.debug(f"Task status before reset check: {self.tasks[task_id].status}")
714
+
715
+ if (self.tasks[task_id].task_type not in ["loop", "decision"] and
716
+ not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
717
+ for t in self.tasks.values())):
718
+ logging.debug(f"=== Resetting non-loop, non-decision task {subtask_name} to 'not started' ===")
719
+ self.tasks[task_id].status = "not started"
720
+ logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
721
+ else:
722
+ logging.debug(f"=== Skipping reset for loop/decision/subtask: {subtask_name} ===")
723
+ logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
484
724
 
485
725
  # Handle loop progression
486
726
  if current_task.task_type == "loop":
@@ -89,7 +89,7 @@ class CSVTools:
89
89
  def write_csv(
90
90
  self,
91
91
  filepath: str,
92
- data: List[Dict[str, Any]],
92
+ data: Union[List[Dict[str, Any]], str],
93
93
  encoding: str = 'utf-8',
94
94
  delimiter: str = ',',
95
95
  index: bool = False,
@@ -102,35 +102,66 @@ class CSVTools:
102
102
 
103
103
  Args:
104
104
  filepath: Path to CSV file
105
- data: List of row dicts to write
106
- encoding: File encoding
107
- delimiter: Column delimiter
108
- index: Whether to write row indices
109
- header: Whether to write column headers
110
- float_format: Format string for float values
111
- date_format: Format string for date values
112
- mode: Write mode ('w' for write, 'a' for append)
113
-
105
+ data: Either a list of dictionaries or a string containing CSV data
106
+ If string, each line should be comma-separated values
107
+ encoding: File encoding (default: 'utf-8')
108
+ delimiter: Column delimiter (default: ',')
109
+ index: Whether to write row indices (default: False)
110
+ header: Whether to write column headers (default: True)
111
+ float_format: Format string for float values (default: None)
112
+ date_format: Format string for date values (default: None)
113
+ mode: Write mode - 'w' for write, 'a' for append (default: 'w')
114
+
114
115
  Returns:
115
- bool: Success status
116
+ bool: True if successful, False otherwise
116
117
  """
117
118
  try:
118
119
  pd = self._get_pandas()
119
120
  if pd is None:
120
121
  return False
121
122
 
122
- df = pd.DataFrame(data)
123
- df.to_csv(
124
- filepath,
125
- encoding=encoding,
126
- sep=delimiter,
127
- index=index,
128
- header=header,
129
- float_format=float_format,
130
- date_format=date_format,
131
- mode=mode
132
- )
133
- return True
123
+ # Handle string input
124
+ if isinstance(data, str):
125
+ # Convert string to list of dicts
126
+ rows = []
127
+ if delimiter in data:
128
+ # Get existing columns if file exists and in append mode
129
+ existing_cols = []
130
+ if mode == 'a' and Path(filepath).exists():
131
+ try:
132
+ existing_df = pd.read_csv(filepath, nrows=1)
133
+ existing_cols = existing_df.columns.tolist()
134
+ except:
135
+ pass
136
+
137
+ values = [v.strip() for v in data.split(delimiter)]
138
+
139
+ if existing_cols:
140
+ # Use existing column names
141
+ row_dict = dict(zip(existing_cols, values))
142
+ else:
143
+ # Create generic column names
144
+ row_dict = {f'col{i}': val for i, val in enumerate(values)}
145
+
146
+ rows.append(row_dict)
147
+ data = rows
148
+
149
+ df = pd.DataFrame(data)
150
+
151
+ # Handle append mode properly
152
+ write_header = header if mode == 'w' else (header and not Path(filepath).exists())
153
+
154
+ df.to_csv(
155
+ filepath,
156
+ encoding=encoding,
157
+ sep=delimiter,
158
+ index=index,
159
+ header=write_header,
160
+ float_format=float_format,
161
+ date_format=date_format,
162
+ mode=mode
163
+ )
164
+ return True
134
165
 
135
166
  except Exception as e:
136
167
  error_msg = f"Error writing CSV file {filepath}: {str(e)}"
@@ -21,7 +21,7 @@ class GenerateCOT:
21
21
  qa_pairs: Optional[Dict[str, str]] = None,
22
22
  model: str = "gpt-4o-mini",
23
23
  api_key: Optional[str] = None,
24
- max_attempts: int = 100
24
+ max_attempts: int = 3
25
25
  ):
26
26
  self.qa_pairs = qa_pairs or {}
27
27
  self.max_attempts = max_attempts
@@ -79,8 +79,10 @@ class GenerateCOT:
79
79
  def cot_improve(self, question: str, current: str) -> str:
80
80
  best_solution = current
81
81
  best_score = self._rate_solution(question, current)
82
+ attempts = 0
82
83
 
83
- for _ in range(self.max_attempts):
84
+ while attempts < self.max_attempts:
85
+ attempts += 1
84
86
  new_solution = self.cot_generate(question, current)
85
87
  new_score = self._rate_solution(question, new_solution)
86
88
 
@@ -88,7 +90,7 @@ class GenerateCOT:
88
90
  best_solution = new_solution
89
91
  best_score = new_score
90
92
 
91
- if best_score > 0.9:
93
+ if best_score > 0.8:
92
94
  break
93
95
 
94
96
  return best_solution
@@ -228,14 +230,16 @@ class GenerateCOT:
228
230
  "final_answer": current_solution
229
231
  }
230
232
  best_score = self._rate_solution(question, current_solution)
233
+ attempts = 0
231
234
 
232
- for _ in range(self.max_attempts):
235
+ while attempts < self.max_attempts:
236
+ attempts += 1
233
237
  new_solution = self.cot_generate_dict(question, current_solution)
234
238
  new_score = self._rate_solution(question, new_solution["thought_process"])
235
239
  if new_score > best_score:
236
240
  best_solution = new_solution
237
241
  best_score = new_score
238
- if best_score > 0.9:
242
+ if best_score > 0.8:
239
243
  break
240
244
  return best_solution
241
245
 
@@ -333,7 +337,10 @@ class GenerateCOT:
333
337
  Creates file with headers if it doesn't exist.
334
338
  """
335
339
  try:
336
- # Remove timestamp-based filename generation since we have default
340
+ # Add the current QA pair to self.qa_pairs
341
+ self.qa_pairs[question] = answer
342
+
343
+ # Generate solution
337
344
  solution = self.cot_run_dict(question)
338
345
 
339
346
  import csv
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.57
3
+ Version: 0.0.58
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -12,13 +12,13 @@ praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRW
12
12
  praisonaiagents/llm/llm.py,sha256=G2wKMwitWBJRS6nOq9W77zXtsxvJwsVwXFOKYcllY0E,51386
13
13
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
14
14
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
15
- praisonaiagents/process/process.py,sha256=gP3QQxxFO4oUw_HYLf8MoyWyaj_104LIL_AbwLiBxaU,31261
15
+ praisonaiagents/process/process.py,sha256=BrS8_4Gt2ewXt559hThJTSrXVYG8daabO9tGDaWmrm0,44906
16
16
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
17
17
  praisonaiagents/task/task.py,sha256=ikFjzNm4WPYONSLtWA3uDGNIUx_TvXTeU5SukWoC66E,14271
18
18
  praisonaiagents/tools/__init__.py,sha256=CWOYV9SudYY82r45LnNgaVRV3cmsAFdasNRkPrLsgmI,9198
19
19
  praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
20
20
  praisonaiagents/tools/calculator_tools.py,sha256=S1xPT74Geurvjm52QMMIG29zDXVEWJmM6nmyY7yF298,9571
21
- praisonaiagents/tools/csv_tools.py,sha256=gX2nYz4ktmpKvXB36jx5-GqddntEQD4G2fVQWTIKrwU,8435
21
+ praisonaiagents/tools/csv_tools.py,sha256=4Yr0QYwBXt-1BDXGLalB2eSsFR2mB5rH3KdHmRBQY6E,10036
22
22
  praisonaiagents/tools/duckdb_tools.py,sha256=KB3b-1HcX7ocoxskDpk_7RRpTGHnH8hizIY0ZdLRbIE,8816
23
23
  praisonaiagents/tools/duckduckgo_tools.py,sha256=ynlB5ZyWfHYjUq0JZXH12TganqTihgD-2IyRgs32y84,1657
24
24
  praisonaiagents/tools/excel_tools.py,sha256=e2HqcwnyBueOyss0xEKxff3zB4w4sNWCOMXvZfbDYlE,11309
@@ -35,8 +35,8 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
35
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
36
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
37
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents/tools/train/data/generatecot.py,sha256=HA8HwbhGIavfALxMbKTdGwABP5S6qzuiPtmUiV-FTZI,17491
39
- praisonaiagents-0.0.57.dist-info/METADATA,sha256=ad3iyUlLBQqjpuTcbka6Z6MAX57RaJGRbkifyYEhz-w,830
40
- praisonaiagents-0.0.57.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
- praisonaiagents-0.0.57.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
42
- praisonaiagents-0.0.57.dist-info/RECORD,,
38
+ praisonaiagents/tools/train/data/generatecot.py,sha256=k1gZHtgY1poVp5kajhgs4S9a4-epdA8NyZfYTa34lQU,17651
39
+ praisonaiagents-0.0.58.dist-info/METADATA,sha256=N_DIe_TPq4gXuySMSwVSPpsSarqo7h-_fTAIQaRFGT0,830
40
+ praisonaiagents-0.0.58.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
+ praisonaiagents-0.0.58.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
42
+ praisonaiagents-0.0.58.dist-info/RECORD,,