praisonaiagents 0.0.130__tar.gz → 0.0.132__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/agent/agent.py +95 -18
  3. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/agent/image_agent.py +12 -3
  4. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/agents/agents.py +32 -7
  5. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/guardrails/llm_guardrail.py +44 -2
  6. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/llm/llm.py +273 -44
  7. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/main.py +91 -45
  8. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/memory/memory.py +16 -6
  9. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/process/process.py +88 -4
  10. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/task/task.py +62 -6
  11. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents.egg-info/PKG-INFO +1 -1
  12. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents.egg-info/SOURCES.txt +4 -0
  13. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/pyproject.toml +1 -1
  14. praisonaiagents-0.0.132/tests/test_llm_self_reflection_direct.py +36 -0
  15. praisonaiagents-0.0.132/tests/test_self_reflection_comprehensive.py +117 -0
  16. praisonaiagents-0.0.132/tests/test_self_reflection_fix_simple.py +37 -0
  17. praisonaiagents-0.0.132/tests/test_self_reflection_fix_verification.py +51 -0
  18. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/README.md +0 -0
  19. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/__init__.py +0 -0
  20. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/agent/__init__.py +0 -0
  21. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/agent/handoff.py +0 -0
  22. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/agent/router_agent.py +0 -0
  23. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/agents/__init__.py +0 -0
  24. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/agents/autoagents.py +0 -0
  25. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/approval.py +0 -0
  26. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/guardrails/__init__.py +0 -0
  27. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  28. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/knowledge/__init__.py +0 -0
  29. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/knowledge/chunking.py +0 -0
  30. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/knowledge/knowledge.py +0 -0
  31. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/llm/__init__.py +0 -0
  32. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/llm/model_capabilities.py +0 -0
  33. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/llm/model_router.py +0 -0
  34. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/llm/openai_client.py +0 -0
  35. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/mcp/__init__.py +0 -0
  36. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/mcp/mcp.py +0 -0
  37. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
  38. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/mcp/mcp_sse.py +0 -0
  39. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/memory/__init__.py +0 -0
  40. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/process/__init__.py +0 -0
  41. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/session.py +0 -0
  42. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/task/__init__.py +0 -0
  43. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/telemetry/__init__.py +0 -0
  44. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/telemetry/integration.py +0 -0
  45. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/telemetry/telemetry.py +0 -0
  46. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/README.md +0 -0
  47. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/__init__.py +0 -0
  48. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/arxiv_tools.py +0 -0
  49. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/calculator_tools.py +0 -0
  50. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/csv_tools.py +0 -0
  51. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/duckdb_tools.py +0 -0
  52. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  53. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/excel_tools.py +0 -0
  54. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/file_tools.py +0 -0
  55. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/json_tools.py +0 -0
  56. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/newspaper_tools.py +0 -0
  57. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/pandas_tools.py +0 -0
  58. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/python_tools.py +0 -0
  59. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/searxng_tools.py +0 -0
  60. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/shell_tools.py +0 -0
  61. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/spider_tools.py +0 -0
  62. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/test.py +0 -0
  63. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/tools.py +0 -0
  64. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  65. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  66. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/xml_tools.py +0 -0
  67. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/yaml_tools.py +0 -0
  68. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents/tools/yfinance_tools.py +0 -0
  69. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  70. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents.egg-info/requires.txt +0 -0
  71. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/praisonaiagents.egg-info/top_level.txt +0 -0
  72. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/setup.cfg +0 -0
  73. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/tests/test-graph-memory.py +0 -0
  74. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/tests/test.py +0 -0
  75. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/tests/test_fix_comprehensive.py +0 -0
  76. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/tests/test_handoff_compatibility.py +0 -0
  77. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/tests/test_http_stream_basic.py +0 -0
  78. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/tests/test_ollama_async_fix.py +0 -0
  79. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/tests/test_ollama_fix.py +0 -0
  80. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/tests/test_posthog_fixed.py +0 -0
  81. {praisonaiagents-0.0.130 → praisonaiagents-0.0.132}/tests/test_validation_feedback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.130
3
+ Version: 0.0.132
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -27,7 +27,8 @@ from ..main import (
27
27
  display_self_reflection,
28
28
  ReflectionOutput,
29
29
  adisplay_instruction,
30
- approval_callback
30
+ approval_callback,
31
+ execute_sync_callback
31
32
  )
32
33
  import inspect
33
34
  import uuid
@@ -424,7 +425,13 @@ class Agent:
424
425
  # Otherwise, fall back to OpenAI environment/name
425
426
  else:
426
427
  self.llm = llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
427
- self.tools = tools if tools else [] # Store original tools
428
+ # Handle tools parameter - ensure it's always a list
429
+ if callable(tools):
430
+ # If a single function/callable is passed, wrap it in a list
431
+ self.tools = [tools]
432
+ else:
433
+ # Handle all falsy values (None, False, 0, "", etc.) by defaulting to empty list
434
+ self.tools = tools or []
428
435
  self.function_calling_llm = function_calling_llm
429
436
  self.max_iter = max_iter
430
437
  self.max_rpm = max_rpm
@@ -1093,6 +1100,10 @@ Your Goal: {self.goal}"""
1093
1100
  execute_tool_fn=self.execute_tool,
1094
1101
  agent_name=self.name,
1095
1102
  agent_role=self.role,
1103
+ agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1104
+ task_name=task_name,
1105
+ task_description=task_description,
1106
+ task_id=task_id,
1096
1107
  reasoning_steps=reasoning_steps
1097
1108
  )
1098
1109
  else:
@@ -1109,6 +1120,10 @@ Your Goal: {self.goal}"""
1109
1120
  execute_tool_fn=self.execute_tool,
1110
1121
  agent_name=self.name,
1111
1122
  agent_role=self.role,
1123
+ agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1124
+ task_name=task_name,
1125
+ task_description=task_description,
1126
+ task_id=task_id,
1112
1127
  reasoning_steps=reasoning_steps
1113
1128
  )
1114
1129
  else:
@@ -1142,8 +1157,39 @@ Your Goal: {self.goal}"""
1142
1157
  except Exception as e:
1143
1158
  display_error(f"Error in chat completion: {e}")
1144
1159
  return None
1145
-
1146
- def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True):
1160
+
1161
+ def _execute_callback_and_display(self, prompt: str, response: str, generation_time: float):
1162
+ """Helper method to execute callbacks and display interaction.
1163
+
1164
+ This centralizes the logic for callback execution and display to avoid duplication.
1165
+ """
1166
+ # Always execute callbacks regardless of verbose setting (only when not using custom LLM)
1167
+ if not self._using_custom_llm:
1168
+ execute_sync_callback(
1169
+ 'interaction',
1170
+ message=prompt,
1171
+ response=response,
1172
+ markdown=self.markdown,
1173
+ generation_time=generation_time,
1174
+ agent_name=self.name,
1175
+ agent_role=self.role,
1176
+ agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1177
+ task_name=None, # Not available in this context
1178
+ task_description=None, # Not available in this context
1179
+ task_id=None # Not available in this context
1180
+ )
1181
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1182
+ if self.verbose and not self._using_custom_llm:
1183
+ display_interaction(prompt, response, markdown=self.markdown,
1184
+ generation_time=generation_time, console=self.console,
1185
+ agent_name=self.name,
1186
+ agent_role=self.role,
1187
+ agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1188
+ task_name=None, # Not available in this context
1189
+ task_description=None, # Not available in this context
1190
+ task_id=None) # Not available in this context
1191
+
1192
+ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True, task_name=None, task_description=None, task_id=None):
1147
1193
  # Log all parameter values when in debug mode
1148
1194
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1149
1195
  param_info = {
@@ -1234,6 +1280,9 @@ Your Goal: {self.goal}"""
1234
1280
  agent_name=self.name,
1235
1281
  agent_role=self.role,
1236
1282
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
1283
+ task_name=task_name,
1284
+ task_description=task_description,
1285
+ task_id=task_id,
1237
1286
  execute_tool_fn=self.execute_tool, # Pass tool execution function
1238
1287
  reasoning_steps=reasoning_steps,
1239
1288
  stream=stream # Pass the stream parameter from chat method
@@ -1321,25 +1370,30 @@ Your Goal: {self.goal}"""
1321
1370
  # Add to chat history and return raw response
1322
1371
  # User message already added before LLM call via _build_messages
1323
1372
  self.chat_history.append({"role": "assistant", "content": response_text})
1324
- # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1325
- if self.verbose and not self._using_custom_llm:
1326
- display_interaction(original_prompt, response_text, markdown=self.markdown,
1327
- generation_time=time.time() - start_time, console=self.console)
1328
- return response_text
1373
+ # Apply guardrail validation even for JSON output
1374
+ try:
1375
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1376
+ # Execute callback after validation
1377
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1378
+ return validated_response
1379
+ except Exception as e:
1380
+ logging.error(f"Agent {self.name}: Guardrail validation failed for JSON output: {e}")
1381
+ # Rollback chat history on guardrail failure
1382
+ self.chat_history = self.chat_history[:chat_history_length]
1383
+ return None
1329
1384
 
1330
1385
  if not self.self_reflect:
1331
1386
  # User message already added before LLM call via _build_messages
1332
1387
  self.chat_history.append({"role": "assistant", "content": response_text})
1333
1388
  if self.verbose:
1334
1389
  logging.debug(f"Agent {self.name} final response: {response_text}")
1335
- # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1336
- if self.verbose and not self._using_custom_llm:
1337
- display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1338
1390
  # Return only reasoning content if reasoning_steps is True
1339
1391
  if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
1340
1392
  # Apply guardrail to reasoning content
1341
1393
  try:
1342
1394
  validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools)
1395
+ # Execute callback after validation
1396
+ self._execute_callback_and_display(original_prompt, validated_reasoning, time.time() - start_time)
1343
1397
  return validated_reasoning
1344
1398
  except Exception as e:
1345
1399
  logging.error(f"Agent {self.name}: Guardrail validation failed for reasoning content: {e}")
@@ -1349,6 +1403,8 @@ Your Goal: {self.goal}"""
1349
1403
  # Apply guardrail to regular response
1350
1404
  try:
1351
1405
  validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1406
+ # Execute callback after validation
1407
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1352
1408
  return validated_response
1353
1409
  except Exception as e:
1354
1410
  logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
@@ -1412,12 +1468,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1412
1468
  display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
1413
1469
  # User message already added before LLM call via _build_messages
1414
1470
  self.chat_history.append({"role": "assistant", "content": response_text})
1415
- # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1416
- if self.verbose and not self._using_custom_llm:
1417
- display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1418
1471
  # Apply guardrail validation after satisfactory reflection
1419
1472
  try:
1420
1473
  validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1474
+ # Execute callback after validation
1475
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1421
1476
  return validated_response
1422
1477
  except Exception as e:
1423
1478
  logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
@@ -1431,12 +1486,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1431
1486
  display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
1432
1487
  # User message already added before LLM call via _build_messages
1433
1488
  self.chat_history.append({"role": "assistant", "content": response_text})
1434
- # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1435
- if self.verbose and not self._using_custom_llm:
1436
- display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1437
1489
  # Apply guardrail validation after max reflections
1438
1490
  try:
1439
1491
  validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1492
+ # Execute callback after validation
1493
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1440
1494
  return validated_response
1441
1495
  except Exception as e:
1442
1496
  logging.error(f"Agent {self.name}: Guardrail validation failed after max reflections: {e}")
@@ -1551,6 +1605,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1551
1605
  agent_name=self.name,
1552
1606
  agent_role=self.role,
1553
1607
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
1608
+ task_name=task_name,
1609
+ task_description=task_description,
1610
+ task_id=task_id,
1554
1611
  execute_tool_fn=self.execute_tool_async,
1555
1612
  reasoning_steps=reasoning_steps
1556
1613
  )
@@ -1869,6 +1926,26 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1869
1926
  """Start the agent with a prompt. This is a convenience method that wraps chat()."""
1870
1927
  return self.chat(prompt, **kwargs)
1871
1928
 
1929
+ def execute(self, task, context=None):
1930
+ """Execute a task synchronously - backward compatibility method"""
1931
+ if hasattr(task, 'description'):
1932
+ prompt = task.description
1933
+ elif isinstance(task, str):
1934
+ prompt = task
1935
+ else:
1936
+ prompt = str(task)
1937
+ return self.chat(prompt)
1938
+
1939
+ async def aexecute(self, task, context=None):
1940
+ """Execute a task asynchronously - backward compatibility method"""
1941
+ if hasattr(task, 'description'):
1942
+ prompt = task.description
1943
+ elif isinstance(task, str):
1944
+ prompt = task
1945
+ else:
1946
+ prompt = str(task)
1947
+ return await self.achat(prompt)
1948
+
1872
1949
  async def execute_tool_async(self, function_name: str, arguments: Dict[str, Any]) -> Any:
1873
1950
  """Async version of execute_tool"""
1874
1951
  try:
@@ -144,8 +144,17 @@ class ImageAgent(Agent):
144
144
  config = self.image_config.dict(exclude_none=True)
145
145
  config.update(kwargs)
146
146
 
147
- # Use llm parameter as the model
148
- config['model'] = self.llm
147
+ # Get the model name robustly from the parent Agent's property
148
+ model_info = self.llm_model
149
+ model_name = model_info.model if hasattr(model_info, 'model') else str(model_info)
150
+
151
+ # Use the model name in config
152
+ config['model'] = model_name
153
+
154
+ # Check if we're using a Gemini model and remove unsupported parameters
155
+ if 'gemini' in model_name.lower():
156
+ # Gemini models don't support response_format parameter
157
+ config.pop('response_format', None)
149
158
 
150
159
  with Progress(
151
160
  SpinnerColumn(),
@@ -154,7 +163,7 @@ class ImageAgent(Agent):
154
163
  ) as progress:
155
164
  try:
156
165
  # Add a task for image generation
157
- task = progress.add_task(f"[cyan]Generating image with {self.llm}...", total=None)
166
+ task = progress.add_task(f"[cyan]Generating image with {model_name}...", total=None)
158
167
 
159
168
  # Use litellm's image generation
160
169
  response = self.litellm(
@@ -491,18 +491,35 @@ Context:
491
491
  tasks_to_run = []
492
492
 
493
493
  # Run sync task in an executor to avoid blocking the event loop
494
- loop = asyncio.get_event_loop()
494
+ loop = asyncio.get_running_loop()
495
495
  await loop.run_in_executor(None, self.run_task, task_id)
496
496
 
497
497
  if tasks_to_run:
498
498
  await asyncio.gather(*tasks_to_run)
499
499
 
500
500
  elif self.process == "sequential":
501
+ async_tasks_to_run = []
502
+
503
+ async def flush_async_tasks():
504
+ """Execute all pending async tasks"""
505
+ nonlocal async_tasks_to_run
506
+ if async_tasks_to_run:
507
+ await asyncio.gather(*async_tasks_to_run)
508
+ async_tasks_to_run = []
509
+
501
510
  async for task_id in process.asequential():
502
511
  if self.tasks[task_id].async_execution:
503
- await self.arun_task(task_id)
512
+ # Collect async tasks to run in parallel
513
+ async_tasks_to_run.append(self.arun_task(task_id))
504
514
  else:
505
- self.run_task(task_id)
515
+ # Before running a sync task, execute all pending async tasks
516
+ await flush_async_tasks()
517
+ # Run sync task in an executor to avoid blocking the event loop
518
+ loop = asyncio.get_running_loop()
519
+ await loop.run_in_executor(None, self.run_task, task_id)
520
+
521
+ # Execute any remaining async tasks at the end
522
+ await flush_async_tasks()
506
523
  elif self.process == "hierarchical":
507
524
  async for task_id in process.ahierarchical():
508
525
  if isinstance(task_id, Task):
@@ -510,7 +527,9 @@ Context:
510
527
  if self.tasks[task_id].async_execution:
511
528
  await self.arun_task(task_id)
512
529
  else:
513
- self.run_task(task_id)
530
+ # Run sync task in an executor to avoid blocking the event loop
531
+ loop = asyncio.get_running_loop()
532
+ await loop.run_in_executor(None, self.run_task, task_id)
514
533
 
515
534
  async def astart(self, content=None, return_dict=False, **kwargs):
516
535
  """Async version of start method
@@ -670,7 +689,10 @@ Context:
670
689
  _get_multimodal_message(task_prompt, task.images),
671
690
  tools=task.tools,
672
691
  output_json=task.output_json,
673
- output_pydantic=task.output_pydantic
692
+ output_pydantic=task.output_pydantic,
693
+ task_name=task.name,
694
+ task_description=task.description,
695
+ task_id=task_id
674
696
  )
675
697
  else:
676
698
  agent_output = executor_agent.chat(
@@ -679,6 +701,9 @@ Context:
679
701
  output_json=task.output_json,
680
702
  output_pydantic=task.output_pydantic,
681
703
  stream=self.stream,
704
+ task_name=task.name,
705
+ task_description=task.description,
706
+ task_id=task_id
682
707
  )
683
708
 
684
709
  if agent_output:
@@ -1116,7 +1141,7 @@ Context:
1116
1141
  response = await agent_instance.achat(current_input)
1117
1142
  else:
1118
1143
  # Run sync function in a thread to avoid blocking
1119
- loop = asyncio.get_event_loop()
1144
+ loop = asyncio.get_running_loop()
1120
1145
  # Correctly pass current_input to the lambda for closure
1121
1146
  response = await loop.run_in_executor(None, lambda ci=current_input: agent_instance.chat(ci))
1122
1147
 
@@ -1271,7 +1296,7 @@ Context:
1271
1296
  if hasattr(agent_instance, 'achat') and asyncio.iscoroutinefunction(agent_instance.achat):
1272
1297
  response = await agent_instance.achat(current_input, tools=agent_instance.tools)
1273
1298
  elif hasattr(agent_instance, 'chat'): # Fallback to sync chat if achat not suitable
1274
- loop = asyncio.get_event_loop()
1299
+ loop = asyncio.get_running_loop()
1275
1300
  response = await loop.run_in_executor(None, lambda ci=current_input: agent_instance.chat(ci, tools=agent_instance.tools))
1276
1301
  else:
1277
1302
  logging.warning(f"Agent {agent_instance.name} has no suitable chat or achat method.")
@@ -19,12 +19,54 @@ class LLMGuardrail:
19
19
 
20
20
  Args:
21
21
  description: Natural language description of what to validate
22
- llm: The LLM instance to use for validation
22
+ llm: The LLM instance to use for validation (can be string or LLM instance)
23
23
  """
24
24
  self.description = description
25
- self.llm = llm
25
+ self.llm = self._initialize_llm(llm)
26
26
  self.logger = logging.getLogger(__name__)
27
27
 
28
+ def _initialize_llm(self, llm: Any) -> Any:
29
+ """Initialize the LLM instance from string identifier or existing instance.
30
+
31
+ Args:
32
+ llm: String identifier, LLM instance, or None
33
+
34
+ Returns:
35
+ LLM instance or None
36
+ """
37
+ # Local import to avoid circular dependencies
38
+ def _get_llm_class():
39
+ from ..llm.llm import LLM
40
+ return LLM
41
+
42
+ if llm is None:
43
+ return None
44
+
45
+ # If it's already an LLM instance, return as-is
46
+ if hasattr(llm, 'chat') or hasattr(llm, 'get_response') or callable(llm):
47
+ return llm
48
+
49
+ # If it's a string, convert to LLM instance
50
+ if isinstance(llm, str):
51
+ try:
52
+ # Handle string identifiers (both provider/model and simple names)
53
+ return _get_llm_class()(model=llm)
54
+ except Exception as e:
55
+ self.logger.error(f"Failed to initialize LLM from string '{llm}': {str(e)}")
56
+ return None
57
+
58
+ # If it's a dict, pass parameters to LLM
59
+ if isinstance(llm, dict) and "model" in llm:
60
+ try:
61
+ return _get_llm_class()(**llm)
62
+ except Exception as e:
63
+ self.logger.error(f"Failed to initialize LLM from dict: {str(e)}")
64
+ return None
65
+
66
+ # Unknown type
67
+ self.logger.warning(f"Unknown LLM type: {type(llm)}, treating as-is")
68
+ return llm
69
+
28
70
  def __call__(self, task_output: TaskOutput) -> Tuple[bool, Union[str, TaskOutput]]:
29
71
  """Validate the task output using the LLM.
30
72