praisonaiagents 0.0.127__tar.gz → 0.0.128__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/agents/agents.py +15 -17
  3. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/agents/autoagents.py +1 -1
  4. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/llm/llm.py +3 -220
  5. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/process/process.py +10 -6
  6. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents.egg-info/PKG-INFO +1 -1
  7. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/pyproject.toml +1 -1
  8. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/README.md +0 -0
  9. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/__init__.py +0 -0
  10. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/agent/__init__.py +0 -0
  11. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/agent/agent.py +0 -0
  12. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/agent/handoff.py +0 -0
  13. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/agent/image_agent.py +0 -0
  14. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/agents/__init__.py +0 -0
  15. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/approval.py +0 -0
  16. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/guardrails/__init__.py +0 -0
  17. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  18. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  19. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/knowledge/__init__.py +0 -0
  20. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/knowledge/chunking.py +0 -0
  21. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/knowledge/knowledge.py +0 -0
  22. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/llm/__init__.py +0 -0
  23. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/llm/model_capabilities.py +0 -0
  24. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/llm/openai_client.py +0 -0
  25. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/main.py +0 -0
  26. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/mcp/__init__.py +0 -0
  27. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/mcp/mcp.py +0 -0
  28. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
  29. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/mcp/mcp_sse.py +0 -0
  30. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/memory/__init__.py +0 -0
  31. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/memory/memory.py +0 -0
  32. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/process/__init__.py +0 -0
  33. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/session.py +0 -0
  34. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/task/__init__.py +0 -0
  35. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/task/task.py +0 -0
  36. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/telemetry/__init__.py +0 -0
  37. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/telemetry/integration.py +0 -0
  38. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/telemetry/telemetry.py +0 -0
  39. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/README.md +0 -0
  40. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/__init__.py +0 -0
  41. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/arxiv_tools.py +0 -0
  42. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/calculator_tools.py +0 -0
  43. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/csv_tools.py +0 -0
  44. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/duckdb_tools.py +0 -0
  45. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  46. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/excel_tools.py +0 -0
  47. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/file_tools.py +0 -0
  48. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/json_tools.py +0 -0
  49. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/newspaper_tools.py +0 -0
  50. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/pandas_tools.py +0 -0
  51. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/python_tools.py +0 -0
  52. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/searxng_tools.py +0 -0
  53. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/shell_tools.py +0 -0
  54. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/spider_tools.py +0 -0
  55. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/test.py +0 -0
  56. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/tools.py +0 -0
  57. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  58. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  59. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/xml_tools.py +0 -0
  60. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/yaml_tools.py +0 -0
  61. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents/tools/yfinance_tools.py +0 -0
  62. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  63. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  64. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents.egg-info/requires.txt +0 -0
  65. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/praisonaiagents.egg-info/top_level.txt +0 -0
  66. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/setup.cfg +0 -0
  67. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/tests/test-graph-memory.py +0 -0
  68. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/tests/test.py +0 -0
  69. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/tests/test_fix_comprehensive.py +0 -0
  70. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/tests/test_handoff_compatibility.py +0 -0
  71. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/tests/test_http_stream_basic.py +0 -0
  72. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/tests/test_ollama_async_fix.py +0 -0
  73. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/tests/test_ollama_fix.py +0 -0
  74. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/tests/test_posthog_fixed.py +0 -0
  75. {praisonaiagents-0.0.127 → praisonaiagents-0.0.128}/tests/test_validation_feedback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.127
3
+ Version: 0.0.128
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -480,24 +480,22 @@ Context:
480
480
  )
481
481
 
482
482
  if self.process == "workflow":
483
- # Collect all tasks that should run in parallel
484
- parallel_tasks = []
483
+ tasks_to_run = []
485
484
  async for task_id in process.aworkflow():
486
- if self.tasks[task_id].async_execution and self.tasks[task_id].is_start:
487
- parallel_tasks.append(task_id)
488
- elif parallel_tasks:
489
- # Execute collected parallel tasks
490
- await asyncio.gather(*[self.arun_task(t) for t in parallel_tasks])
491
- parallel_tasks = []
492
- # Run the current non-parallel task
493
- if self.tasks[task_id].async_execution:
494
- await self.arun_task(task_id)
495
- else:
496
- self.run_task(task_id)
497
-
498
- # Execute any remaining parallel tasks
499
- if parallel_tasks:
500
- await asyncio.gather(*[self.arun_task(t) for t in parallel_tasks])
485
+ if self.tasks[task_id].async_execution:
486
+ tasks_to_run.append(self.arun_task(task_id))
487
+ else:
488
+ # If we encounter a sync task, we must wait for the previous async tasks to finish.
489
+ if tasks_to_run:
490
+ await asyncio.gather(*tasks_to_run)
491
+ tasks_to_run = []
492
+
493
+ # Run sync task in an executor to avoid blocking the event loop
494
+ loop = asyncio.get_event_loop()
495
+ await loop.run_in_executor(None, self.run_task, task_id)
496
+
497
+ if tasks_to_run:
498
+ await asyncio.gather(*tasks_to_run)
501
499
 
502
500
  elif self.process == "sequential":
503
501
  async for task_id in process.asequential():
@@ -136,7 +136,7 @@ class AutoAgents(PraisonAIAgents):
136
136
  completion_checker=completion_checker,
137
137
  max_retries=max_retries,
138
138
  process=process,
139
- manager_llm=manager_llm
139
+ manager_llm=manager_llm or self.llm
140
140
  )
141
141
 
142
142
  def _display_agents_and_tasks(self, agents: List[Agent], tasks: List[Task]):
@@ -858,102 +858,6 @@ class LLM:
858
858
  iteration_count += 1
859
859
  continue
860
860
 
861
- # Special handling for Ollama models that don't automatically process tool results
862
- ollama_handled = False
863
- ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
864
-
865
- if ollama_params:
866
- # Get response based on streaming mode
867
- if stream:
868
- # Streaming approach
869
- if verbose:
870
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
871
- response_text = ""
872
- for chunk in litellm.completion(
873
- **self._build_completion_params(
874
- messages=ollama_params["follow_up_messages"],
875
- temperature=temperature,
876
- stream=True
877
- )
878
- ):
879
- if chunk and chunk.choices and chunk.choices[0].delta.content:
880
- content = chunk.choices[0].delta.content
881
- response_text += content
882
- live.update(display_generating(response_text, start_time))
883
- else:
884
- response_text = ""
885
- for chunk in litellm.completion(
886
- **self._build_completion_params(
887
- messages=ollama_params["follow_up_messages"],
888
- temperature=temperature,
889
- stream=True
890
- )
891
- ):
892
- if chunk and chunk.choices and chunk.choices[0].delta.content:
893
- response_text += chunk.choices[0].delta.content
894
- else:
895
- # Non-streaming approach
896
- resp = litellm.completion(
897
- **self._build_completion_params(
898
- messages=ollama_params["follow_up_messages"],
899
- temperature=temperature,
900
- stream=False
901
- )
902
- )
903
- response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""
904
-
905
- # Set flag to indicate Ollama was handled
906
- ollama_handled = True
907
- final_response_text = response_text.strip() if response_text else ""
908
- logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
909
-
910
- # Display the response if we got one
911
- if final_response_text and verbose:
912
- display_interaction(
913
- ollama_params["original_prompt"],
914
- final_response_text,
915
- markdown=markdown,
916
- generation_time=time.time() - start_time,
917
- console=console
918
- )
919
-
920
- # Update messages and continue the loop instead of returning
921
- if final_response_text:
922
- # Update messages with the response to maintain conversation context
923
- messages.append({
924
- "role": "assistant",
925
- "content": final_response_text
926
- })
927
- # Continue the loop to check if more tools are needed
928
- iteration_count += 1
929
- continue
930
- else:
931
- logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
932
-
933
- # Handle reasoning_steps after tool execution if not already handled by Ollama
934
- if reasoning_steps and not ollama_handled:
935
- # Make a non-streaming call to capture reasoning content
936
- reasoning_resp = litellm.completion(
937
- **self._build_completion_params(
938
- messages=messages,
939
- temperature=temperature,
940
- stream=False, # force non-streaming
941
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
942
- )
943
- )
944
- reasoning_content = reasoning_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
945
- response_text = reasoning_resp["choices"][0]["message"]["content"]
946
-
947
- # Store reasoning content for later use
948
- if reasoning_content:
949
- stored_reasoning_content = reasoning_content
950
-
951
- # Update messages with the response
952
- messages.append({
953
- "role": "assistant",
954
- "content": response_text
955
- })
956
-
957
861
  # After tool execution, continue the loop to check if more tools are needed
958
862
  # instead of immediately trying to get a final response
959
863
  iteration_count += 1
@@ -1430,68 +1334,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1430
1334
  # Get response after tool calls
1431
1335
  response_text = ""
1432
1336
 
1433
- # Special handling for Ollama models that don't automatically process tool results
1434
- ollama_handled = False
1435
- ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
1436
-
1437
- if ollama_params:
1438
- # Get response with streaming
1439
- if verbose:
1440
- response_text = ""
1441
- async for chunk in await litellm.acompletion(
1442
- **self._build_completion_params(
1443
- messages=ollama_params["follow_up_messages"],
1444
- temperature=temperature,
1445
- stream=stream
1446
- )
1447
- ):
1448
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1449
- content = chunk.choices[0].delta.content
1450
- response_text += content
1451
- print("\033[K", end="\r")
1452
- print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
1453
- else:
1454
- response_text = ""
1455
- async for chunk in await litellm.acompletion(
1456
- **self._build_completion_params(
1457
- messages=ollama_params["follow_up_messages"],
1458
- temperature=temperature,
1459
- stream=stream
1460
- )
1461
- ):
1462
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1463
- response_text += chunk.choices[0].delta.content
1464
-
1465
- # Set flag to indicate Ollama was handled
1466
- ollama_handled = True
1467
- final_response_text = response_text.strip()
1468
- logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
1469
-
1470
- # Display the response if we got one
1471
- if final_response_text and verbose:
1472
- display_interaction(
1473
- ollama_params["original_prompt"],
1474
- final_response_text,
1475
- markdown=markdown,
1476
- generation_time=time.time() - start_time,
1477
- console=console
1478
- )
1479
-
1480
- # Store the response for potential final return
1481
- if final_response_text:
1482
- # Update messages with the response to maintain conversation context
1483
- messages.append({
1484
- "role": "assistant",
1485
- "content": final_response_text
1486
- })
1487
- # Continue the loop to check if more tools are needed
1488
- iteration_count += 1
1489
- continue
1490
- else:
1491
- logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
1492
-
1493
- # If no special handling was needed or if it's not an Ollama model
1494
- if reasoning_steps and not ollama_handled:
1337
+ # If no special handling was needed
1338
+ if reasoning_steps:
1495
1339
  # Non-streaming call to capture reasoning
1496
1340
  resp = await litellm.acompletion(
1497
1341
  **self._build_completion_params(
@@ -1521,7 +1365,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1521
1365
  generation_time=time.time() - start_time,
1522
1366
  console=console
1523
1367
  )
1524
- elif not ollama_handled:
1368
+ else:
1525
1369
  # Get response after tool calls with streaming if not already handled
1526
1370
  if verbose:
1527
1371
  async for chunk in await litellm.acompletion(
@@ -1790,67 +1634,6 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1790
1634
 
1791
1635
  litellm.callbacks = events
1792
1636
 
1793
- def _handle_ollama_model(self, response_text: str, tool_results: List[Any], messages: List[Dict], original_prompt: Union[str, List[Dict]]) -> Optional[Dict[str, Any]]:
1794
- """
1795
- Handle special Ollama model requirements when processing tool results.
1796
-
1797
- Args:
1798
- response_text: The initial response text from the model
1799
- tool_results: List of tool execution results
1800
- messages: The conversation messages list
1801
- original_prompt: The original user prompt
1802
-
1803
- Returns:
1804
- Dict with follow-up parameters if Ollama needs special handling, None otherwise
1805
- """
1806
- if not self._is_ollama_provider() or not tool_results:
1807
- return None
1808
-
1809
- # Check if the response is just a JSON tool call
1810
- try:
1811
- json_response = json.loads(response_text.strip() if response_text else "{}")
1812
- if not (('name' in json_response or 'function' in json_response) and
1813
- not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
1814
- return None
1815
-
1816
- logging.debug("Detected Ollama returning only tool call JSON, preparing follow-up call to process results")
1817
-
1818
- # Extract the original user query from messages
1819
- original_query = ""
1820
- for msg in reversed(messages): # Look from the end to find the most recent user message
1821
- if msg.get("role") == "user":
1822
- content = msg.get("content", "")
1823
- # Handle list content (multimodal)
1824
- if isinstance(content, list):
1825
- for item in content:
1826
- if isinstance(item, dict) and item.get("type") == "text":
1827
- original_query = item.get("text", "")
1828
- break
1829
- else:
1830
- original_query = content
1831
- if original_query:
1832
- break
1833
-
1834
- # Create a shorter follow-up prompt with all tool results
1835
- # If there's only one result, use it directly; otherwise combine them
1836
- if len(tool_results) == 1:
1837
- results_text = json.dumps(tool_results[0], indent=2)
1838
- else:
1839
- results_text = json.dumps(tool_results, indent=2)
1840
-
1841
- follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1842
- logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
1843
- logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
1844
-
1845
- # Return parameters for follow-up call
1846
- return {
1847
- "follow_up_messages": [{"role": "user", "content": follow_up_prompt}],
1848
- "original_prompt": original_prompt
1849
- }
1850
-
1851
- except (json.JSONDecodeError, KeyError):
1852
- # Not a JSON response or not a tool call format
1853
- return None
1854
1637
 
1855
1638
  def _build_completion_params(self, **override_params) -> Dict[str, Any]:
1856
1639
  """Build parameters for litellm completion calls with all necessary config"""
@@ -469,16 +469,18 @@ Subtask: {st.name}
469
469
  logging.debug(f"Task type: {task_to_check.task_type}")
470
470
  logging.debug(f"Task status before reset check: {task_to_check.status}")
471
471
  logging.debug(f"Task rerun: {getattr(task_to_check, 'rerun', True)}") # default to True if not set
472
+ logging.debug(f"Task async_execution: {task_to_check.async_execution}")
472
473
 
473
474
  if (getattr(task_to_check, 'rerun', True) and # Corrected condition - reset only if rerun is True (or default True)
474
475
  task_to_check.task_type != "loop" and # Removed "decision" from exclusion
475
476
  not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
476
- for t in self.tasks.values())):
477
- logging.debug(f"=== Resetting non-loop, non-decision task {subtask_name} to 'not started' ===")
477
+ for t in self.tasks.values()) and
478
+ not task_to_check.async_execution): # Don't reset async parallel tasks
479
+ logging.debug(f"=== Resetting non-loop, non-decision, non-parallel task {subtask_name} to 'not started' ===")
478
480
  self.tasks[task_id].status = "not started"
479
481
  logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
480
482
  else:
481
- logging.debug(f"=== Skipping reset for loop/decision/subtask or rerun=False: {subtask_name} ===")
483
+ logging.debug(f"=== Skipping reset for loop/decision/subtask/parallel or rerun=False: {subtask_name} ===")
482
484
  logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
483
485
 
484
486
  # Handle loop progression
@@ -1099,16 +1101,18 @@ Subtask: {st.name}
1099
1101
  logging.debug(f"Task type: {task_to_check.task_type}")
1100
1102
  logging.debug(f"Task status before reset check: {task_to_check.status}")
1101
1103
  logging.debug(f"Task rerun: {getattr(task_to_check, 'rerun', True)}") # default to True if not set
1104
+ logging.debug(f"Task async_execution: {task_to_check.async_execution}")
1102
1105
 
1103
1106
  if (getattr(task_to_check, 'rerun', True) and # Corrected condition - reset only if rerun is True (or default True)
1104
1107
  task_to_check.task_type != "loop" and # Removed "decision" from exclusion
1105
1108
  not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
1106
- for t in self.tasks.values())):
1107
- logging.debug(f"=== Resetting non-loop, non-decision task {subtask_name} to 'not started' ===")
1109
+ for t in self.tasks.values()) and
1110
+ not task_to_check.async_execution): # Don't reset async parallel tasks
1111
+ logging.debug(f"=== Resetting non-loop, non-decision, non-parallel task {subtask_name} to 'not started' ===")
1108
1112
  self.tasks[task_id].status = "not started"
1109
1113
  logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
1110
1114
  else:
1111
- logging.debug(f"=== Skipping reset for loop/decision/subtask or rerun=False: {subtask_name} ===")
1115
+ logging.debug(f"=== Skipping reset for loop/decision/subtask/parallel or rerun=False: {subtask_name} ===")
1112
1116
  logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
1113
1117
 
1114
1118
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.127
3
+ Version: 0.0.128
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.127"
7
+ version = "0.0.128"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [