praisonaiagents 0.0.131__tar.gz → 0.0.132__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/agent/agent.py +20 -0
  3. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/guardrails/llm_guardrail.py +44 -2
  4. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/llm/llm.py +75 -58
  5. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/main.py +39 -42
  6. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/process/process.py +88 -4
  7. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/task/task.py +62 -6
  8. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents.egg-info/PKG-INFO +1 -1
  9. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/pyproject.toml +1 -1
  10. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/README.md +0 -0
  11. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/__init__.py +0 -0
  12. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/agent/__init__.py +0 -0
  13. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/agent/handoff.py +0 -0
  14. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/agent/image_agent.py +0 -0
  15. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/agent/router_agent.py +0 -0
  16. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/agents/__init__.py +0 -0
  17. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/agents/agents.py +0 -0
  18. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/agents/autoagents.py +0 -0
  19. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/approval.py +0 -0
  20. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/guardrails/__init__.py +0 -0
  21. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  22. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/knowledge/__init__.py +0 -0
  23. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/knowledge/chunking.py +0 -0
  24. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/knowledge/knowledge.py +0 -0
  25. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/llm/__init__.py +0 -0
  26. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/llm/model_capabilities.py +0 -0
  27. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/llm/model_router.py +0 -0
  28. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/llm/openai_client.py +0 -0
  29. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/mcp/__init__.py +0 -0
  30. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/mcp/mcp.py +0 -0
  31. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
  32. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/mcp/mcp_sse.py +0 -0
  33. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/memory/__init__.py +0 -0
  34. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/memory/memory.py +0 -0
  35. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/process/__init__.py +0 -0
  36. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/session.py +0 -0
  37. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/task/__init__.py +0 -0
  38. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/telemetry/__init__.py +0 -0
  39. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/telemetry/integration.py +0 -0
  40. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/telemetry/telemetry.py +0 -0
  41. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/README.md +0 -0
  42. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/__init__.py +0 -0
  43. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/arxiv_tools.py +0 -0
  44. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/calculator_tools.py +0 -0
  45. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/csv_tools.py +0 -0
  46. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/duckdb_tools.py +0 -0
  47. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  48. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/excel_tools.py +0 -0
  49. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/file_tools.py +0 -0
  50. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/json_tools.py +0 -0
  51. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/newspaper_tools.py +0 -0
  52. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/pandas_tools.py +0 -0
  53. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/python_tools.py +0 -0
  54. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/searxng_tools.py +0 -0
  55. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/shell_tools.py +0 -0
  56. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/spider_tools.py +0 -0
  57. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/test.py +0 -0
  58. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/tools.py +0 -0
  59. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  60. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  61. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/xml_tools.py +0 -0
  62. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/yaml_tools.py +0 -0
  63. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents/tools/yfinance_tools.py +0 -0
  64. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  65. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  66. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents.egg-info/requires.txt +0 -0
  67. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/praisonaiagents.egg-info/top_level.txt +0 -0
  68. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/setup.cfg +0 -0
  69. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test-graph-memory.py +0 -0
  70. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test.py +0 -0
  71. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test_fix_comprehensive.py +0 -0
  72. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test_handoff_compatibility.py +0 -0
  73. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test_http_stream_basic.py +0 -0
  74. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test_llm_self_reflection_direct.py +0 -0
  75. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test_ollama_async_fix.py +0 -0
  76. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test_ollama_fix.py +0 -0
  77. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test_posthog_fixed.py +0 -0
  78. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test_self_reflection_comprehensive.py +0 -0
  79. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test_self_reflection_fix_simple.py +0 -0
  80. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test_self_reflection_fix_verification.py +0 -0
  81. {praisonaiagents-0.0.131 → praisonaiagents-0.0.132}/tests/test_validation_feedback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.131
3
+ Version: 0.0.132
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1926,6 +1926,26 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1926
1926
  """Start the agent with a prompt. This is a convenience method that wraps chat()."""
1927
1927
  return self.chat(prompt, **kwargs)
1928
1928
 
1929
+ def execute(self, task, context=None):
1930
+ """Execute a task synchronously - backward compatibility method"""
1931
+ if hasattr(task, 'description'):
1932
+ prompt = task.description
1933
+ elif isinstance(task, str):
1934
+ prompt = task
1935
+ else:
1936
+ prompt = str(task)
1937
+ return self.chat(prompt)
1938
+
1939
+ async def aexecute(self, task, context=None):
1940
+ """Execute a task asynchronously - backward compatibility method"""
1941
+ if hasattr(task, 'description'):
1942
+ prompt = task.description
1943
+ elif isinstance(task, str):
1944
+ prompt = task
1945
+ else:
1946
+ prompt = str(task)
1947
+ return await self.achat(prompt)
1948
+
1929
1949
  async def execute_tool_async(self, function_name: str, arguments: Dict[str, Any]) -> Any:
1930
1950
  """Async version of execute_tool"""
1931
1951
  try:
@@ -19,12 +19,54 @@ class LLMGuardrail:
19
19
 
20
20
  Args:
21
21
  description: Natural language description of what to validate
22
- llm: The LLM instance to use for validation
22
+ llm: The LLM instance to use for validation (can be string or LLM instance)
23
23
  """
24
24
  self.description = description
25
- self.llm = llm
25
+ self.llm = self._initialize_llm(llm)
26
26
  self.logger = logging.getLogger(__name__)
27
27
 
28
+ def _initialize_llm(self, llm: Any) -> Any:
29
+ """Initialize the LLM instance from string identifier or existing instance.
30
+
31
+ Args:
32
+ llm: String identifier, LLM instance, or None
33
+
34
+ Returns:
35
+ LLM instance or None
36
+ """
37
+ # Local import to avoid circular dependencies
38
+ def _get_llm_class():
39
+ from ..llm.llm import LLM
40
+ return LLM
41
+
42
+ if llm is None:
43
+ return None
44
+
45
+ # If it's already an LLM instance, return as-is
46
+ if hasattr(llm, 'chat') or hasattr(llm, 'get_response') or callable(llm):
47
+ return llm
48
+
49
+ # If it's a string, convert to LLM instance
50
+ if isinstance(llm, str):
51
+ try:
52
+ # Handle string identifiers (both provider/model and simple names)
53
+ return _get_llm_class()(model=llm)
54
+ except Exception as e:
55
+ self.logger.error(f"Failed to initialize LLM from string '{llm}': {str(e)}")
56
+ return None
57
+
58
+ # If it's a dict, pass parameters to LLM
59
+ if isinstance(llm, dict) and "model" in llm:
60
+ try:
61
+ return _get_llm_class()(**llm)
62
+ except Exception as e:
63
+ self.logger.error(f"Failed to initialize LLM from dict: {str(e)}")
64
+ return None
65
+
66
+ # Unknown type
67
+ self.logger.warning(f"Unknown LLM type: {type(llm)}, treating as-is")
68
+ return llm
69
+
28
70
  def __call__(self, task_output: TaskOutput) -> Tuple[bool, Union[str, TaskOutput]]:
29
71
  """Validate the task output using the LLM.
30
72
 
@@ -744,18 +744,9 @@ class LLM:
744
744
  response_text = resp["choices"][0]["message"]["content"]
745
745
  final_response = resp
746
746
 
747
- # Always execute callbacks regardless of verbose setting
747
+ # Execute callbacks and display based on verbose setting
748
748
  generation_time_val = time.time() - current_time
749
- interaction_displayed = False
750
-
751
749
  response_content = f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}" if reasoning_content else response_text
752
- execute_sync_callback(
753
- 'interaction',
754
- message=original_prompt,
755
- response=response_content,
756
- markdown=markdown,
757
- generation_time=generation_time_val
758
- )
759
750
 
760
751
  # Optionally display reasoning if present
761
752
  if verbose and reasoning_content and not interaction_displayed:
@@ -763,7 +754,7 @@ class LLM:
763
754
  original_prompt,
764
755
  f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
765
756
  markdown=markdown,
766
- generation_time=time.time() - current_time,
757
+ generation_time=generation_time_val,
767
758
  console=console,
768
759
  agent_name=agent_name,
769
760
  agent_role=agent_role,
@@ -773,12 +764,13 @@ class LLM:
773
764
  task_id=task_id
774
765
  )
775
766
  interaction_displayed = True
767
+ callback_executed = True
776
768
  elif verbose and not interaction_displayed:
777
769
  display_interaction(
778
770
  original_prompt,
779
771
  response_text,
780
772
  markdown=markdown,
781
- generation_time=time.time() - current_time,
773
+ generation_time=generation_time_val,
782
774
  console=console,
783
775
  agent_name=agent_name,
784
776
  agent_role=agent_role,
@@ -788,6 +780,17 @@ class LLM:
788
780
  task_id=task_id
789
781
  )
790
782
  interaction_displayed = True
783
+ callback_executed = True
784
+ elif not callback_executed:
785
+ # Only execute callback if display_interaction hasn't been called (which would trigger callbacks internally)
786
+ execute_sync_callback(
787
+ 'interaction',
788
+ message=original_prompt,
789
+ response=response_content,
790
+ markdown=markdown,
791
+ generation_time=generation_time_val
792
+ )
793
+ callback_executed = True
791
794
 
792
795
  # Otherwise do the existing streaming approach
793
796
  else:
@@ -847,14 +850,16 @@ class LLM:
847
850
 
848
851
  response_text = response_text.strip() if response_text else ""
849
852
 
850
- # Always execute callbacks after streaming completes
851
- execute_sync_callback(
852
- 'interaction',
853
- message=original_prompt,
854
- response=response_text,
855
- markdown=markdown,
856
- generation_time=time.time() - current_time
857
- )
853
+ # Execute callbacks after streaming completes (only if not verbose, since verbose will call display_interaction later)
854
+ if not verbose and not callback_executed:
855
+ execute_sync_callback(
856
+ 'interaction',
857
+ message=original_prompt,
858
+ response=response_text,
859
+ markdown=markdown,
860
+ generation_time=time.time() - current_time
861
+ )
862
+ callback_executed = True
858
863
 
859
864
 
860
865
  # Create a mock final_response with the captured data
@@ -881,18 +886,9 @@ class LLM:
881
886
  )
882
887
  response_text = final_response["choices"][0]["message"]["content"]
883
888
 
884
- # Always execute callbacks regardless of verbose setting
885
- execute_sync_callback(
886
- 'interaction',
887
- message=original_prompt,
888
- response=response_text,
889
- markdown=markdown,
890
- generation_time=time.time() - current_time
891
- )
892
-
893
-
889
+ # Execute callbacks and display based on verbose setting
894
890
  if verbose and not interaction_displayed:
895
- # Display the complete response at once
891
+ # Display the complete response at once (this will trigger callbacks internally)
896
892
  display_interaction(
897
893
  original_prompt,
898
894
  response_text,
@@ -907,6 +903,17 @@ class LLM:
907
903
  task_id=task_id
908
904
  )
909
905
  interaction_displayed = True
906
+ callback_executed = True
907
+ elif not callback_executed:
908
+ # Only execute callback if display_interaction hasn't been called
909
+ execute_sync_callback(
910
+ 'interaction',
911
+ message=original_prompt,
912
+ response=response_text,
913
+ markdown=markdown,
914
+ generation_time=time.time() - current_time
915
+ )
916
+ callback_executed = True
910
917
 
911
918
  tool_calls = final_response["choices"][0]["message"].get("tool_calls")
912
919
 
@@ -1010,16 +1017,8 @@ class LLM:
1010
1017
  return final_response_text
1011
1018
 
1012
1019
  # No tool calls were made in this iteration, return the response
1013
- # Always execute callbacks regardless of verbose setting
1014
1020
  generation_time_val = time.time() - start_time
1015
1021
  response_content = f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{response_text}" if stored_reasoning_content else response_text
1016
- execute_sync_callback(
1017
- 'interaction',
1018
- message=original_prompt,
1019
- response=response_content,
1020
- markdown=markdown,
1021
- generation_time=generation_time_val
1022
- )
1023
1022
 
1024
1023
  if verbose and not interaction_displayed:
1025
1024
  # If we have stored reasoning content from tool execution, display it
@@ -1028,7 +1027,7 @@ class LLM:
1028
1027
  original_prompt,
1029
1028
  f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{response_text}",
1030
1029
  markdown=markdown,
1031
- generation_time=time.time() - start_time,
1030
+ generation_time=generation_time_val,
1032
1031
  console=console,
1033
1032
  agent_name=agent_name,
1034
1033
  agent_role=agent_role,
@@ -1042,7 +1041,7 @@ class LLM:
1042
1041
  original_prompt,
1043
1042
  response_text,
1044
1043
  markdown=markdown,
1045
- generation_time=time.time() - start_time,
1044
+ generation_time=generation_time_val,
1046
1045
  console=console,
1047
1046
  agent_name=agent_name,
1048
1047
  agent_role=agent_role,
@@ -1052,6 +1051,17 @@ class LLM:
1052
1051
  task_id=task_id
1053
1052
  )
1054
1053
  interaction_displayed = True
1054
+ callback_executed = True
1055
+ elif not callback_executed:
1056
+ # Only execute callback if display_interaction hasn't been called
1057
+ execute_sync_callback(
1058
+ 'interaction',
1059
+ message=original_prompt,
1060
+ response=response_content,
1061
+ markdown=markdown,
1062
+ generation_time=generation_time_val
1063
+ )
1064
+ callback_executed = True
1055
1065
 
1056
1066
  response_text = response_text.strip() if response_text else ""
1057
1067
 
@@ -1063,26 +1073,16 @@ class LLM:
1063
1073
  if output_json or output_pydantic:
1064
1074
  self.chat_history.append({"role": "user", "content": original_prompt})
1065
1075
  self.chat_history.append({"role": "assistant", "content": response_text})
1066
- # Always execute callbacks regardless of verbose setting
1067
- if not interaction_displayed:
1068
- execute_sync_callback(
1069
- 'interaction',
1070
- message=original_prompt,
1071
- response=response_text,
1072
- markdown=markdown,
1073
- generation_time=time.time() - start_time
1074
- )
1076
+
1075
1077
  if verbose and not interaction_displayed:
1076
1078
  display_interaction(original_prompt, response_text, markdown=markdown,
1077
1079
  generation_time=time.time() - start_time, console=console,
1078
1080
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1079
1081
  task_name=task_name, task_description=task_description, task_id=task_id)
1080
1082
  interaction_displayed = True
1081
- return response_text
1082
-
1083
- if not self_reflect:
1084
- # Always execute callbacks regardless of verbose setting
1085
- if not interaction_displayed:
1083
+ callback_executed = True
1084
+ elif not callback_executed:
1085
+ # Only execute callback if display_interaction hasn't been called
1086
1086
  execute_sync_callback(
1087
1087
  'interaction',
1088
1088
  message=original_prompt,
@@ -1090,13 +1090,28 @@ class LLM:
1090
1090
  markdown=markdown,
1091
1091
  generation_time=time.time() - start_time
1092
1092
  )
1093
+ callback_executed = True
1094
+ return response_text
1093
1095
 
1096
+ if not self_reflect:
1094
1097
  if verbose and not interaction_displayed:
1095
1098
  display_interaction(original_prompt, response_text, markdown=markdown,
1096
1099
  generation_time=time.time() - start_time, console=console,
1097
1100
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1098
1101
  task_name=task_name, task_description=task_description, task_id=task_id)
1099
1102
  interaction_displayed = True
1103
+ callback_executed = True
1104
+ elif not callback_executed:
1105
+ # Only execute callback if display_interaction hasn't been called
1106
+ execute_sync_callback(
1107
+ 'interaction',
1108
+ message=original_prompt,
1109
+ response=response_text,
1110
+ markdown=markdown,
1111
+ generation_time=time.time() - start_time
1112
+ )
1113
+ callback_executed = True
1114
+
1100
1115
  # Return reasoning content if reasoning_steps is True
1101
1116
  if reasoning_steps and stored_reasoning_content:
1102
1117
  return stored_reasoning_content
@@ -1401,6 +1416,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1401
1416
 
1402
1417
  start_time = time.time()
1403
1418
  reflection_count = 0
1419
+ callback_executed = False # Track if callback has been executed for this interaction
1404
1420
  interaction_displayed = False # Track if interaction has been displayed
1405
1421
 
1406
1422
  # Format tools for LiteLLM using the shared helper
@@ -2031,10 +2047,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2031
2047
  output_json = override_params.get('output_json')
2032
2048
  output_pydantic = override_params.get('output_pydantic')
2033
2049
 
2050
+ # Always remove these from params as they're not native litellm parameters
2051
+ params.pop('output_json', None)
2052
+ params.pop('output_pydantic', None)
2053
+
2034
2054
  if output_json or output_pydantic:
2035
- # Always remove these from params as they're not native litellm parameters
2036
- params.pop('output_json', None)
2037
- params.pop('output_pydantic', None)
2038
2055
 
2039
2056
  # Check if this is a Gemini model that supports native structured outputs
2040
2057
  if self._is_gemini_model():
@@ -159,35 +159,20 @@ def display_interaction(message, response, markdown=True, generation_time=None,
159
159
  message = _clean_display_content(str(message))
160
160
  response = _clean_display_content(str(response))
161
161
 
162
-
163
- # Execute synchronous callback if registered
164
- if 'interaction' in sync_display_callbacks:
165
- callback = sync_display_callbacks['interaction']
166
- import inspect
167
- sig = inspect.signature(callback)
168
-
169
- all_kwargs = {
170
- 'message': message,
171
- 'response': response,
172
- 'markdown': markdown,
173
- 'generation_time': generation_time,
174
- 'agent_name': agent_name,
175
- 'agent_role': agent_role,
176
- 'agent_tools': agent_tools,
177
- 'task_name': task_name,
178
- 'task_description': task_description,
179
- 'task_id': task_id
180
- }
181
-
182
- # Filter kwargs to what the callback accepts to maintain backward compatibility
183
- if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()):
184
- # Callback accepts **kwargs, so pass all arguments
185
- supported_kwargs = all_kwargs
186
- else:
187
- # Only pass arguments that the callback signature supports
188
- supported_kwargs = {k: v for k, v in all_kwargs.items() if k in sig.parameters}
189
-
190
- callback(**supported_kwargs)
162
+ # Execute synchronous callbacks
163
+ execute_sync_callback(
164
+ 'interaction',
165
+ message=message,
166
+ response=response,
167
+ markdown=markdown,
168
+ generation_time=generation_time,
169
+ agent_name=agent_name,
170
+ agent_role=agent_role,
171
+ agent_tools=agent_tools,
172
+ task_name=task_name,
173
+ task_description=task_description,
174
+ task_id=task_id
175
+ )
191
176
  # Rest of the display logic...
192
177
  if generation_time:
193
178
  console.print(Text(f"Response generated in {generation_time:.1f}s", style="dim"))
@@ -206,6 +191,9 @@ def display_self_reflection(message: str, console=None):
206
191
  console = Console()
207
192
  message = _clean_display_content(str(message))
208
193
 
194
+ # Execute synchronous callbacks
195
+ execute_sync_callback('self_reflection', message=message)
196
+
209
197
  console.print(Panel.fit(Text(message, style="bold yellow"), title="Self Reflection", border_style="magenta"))
210
198
 
211
199
  def display_instruction(message: str, console=None, agent_name: str = None, agent_role: str = None, agent_tools: List[str] = None):
@@ -215,6 +203,9 @@ def display_instruction(message: str, console=None, agent_name: str = None, agen
215
203
  console = Console()
216
204
  message = _clean_display_content(str(message))
217
205
 
206
+ # Execute synchronous callbacks
207
+ execute_sync_callback('instruction', message=message, agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools)
208
+
218
209
  # Display agent info if available
219
210
  if agent_name:
220
211
  agent_info = f"[bold #FF9B9B]👤 Agent:[/] [#FFE5E5]{agent_name}[/]"
@@ -239,6 +230,9 @@ def display_tool_call(message: str, console=None):
239
230
  message = _clean_display_content(str(message))
240
231
  logging.debug(f"Cleaned message in display_tool_call: {repr(message)}")
241
232
 
233
+ # Execute synchronous callbacks
234
+ execute_sync_callback('tool_call', message=message)
235
+
242
236
  console.print(Panel.fit(Text(message, style="bold cyan"), title="Tool Call", border_style="green"))
243
237
 
244
238
  def display_error(message: str, console=None):
@@ -248,6 +242,9 @@ def display_error(message: str, console=None):
248
242
  console = Console()
249
243
  message = _clean_display_content(str(message))
250
244
 
245
+ # Execute synchronous callbacks
246
+ execute_sync_callback('error', message=message)
247
+
251
248
  console.print(Panel.fit(Text(message, style="bold red"), title="Error", border_style="red"))
252
249
  error_logs.append(message)
253
250
 
@@ -263,6 +260,9 @@ def display_generating(content: str = "", start_time: Optional[float] = None):
263
260
 
264
261
  content = _clean_display_content(str(content))
265
262
 
263
+ # Execute synchronous callbacks
264
+ execute_sync_callback('generating', content=content, elapsed_time=elapsed_str.strip() if elapsed_str else None)
265
+
266
266
  return Panel(Markdown(content), title=f"Generating...{elapsed_str}", border_style="green")
267
267
 
268
268
  # Async versions with 'a' prefix
@@ -312,8 +312,8 @@ async def adisplay_self_reflection(message: str, console=None):
312
312
  console = Console()
313
313
  message = _clean_display_content(str(message))
314
314
 
315
- if 'self_reflection' in async_display_callbacks:
316
- await async_display_callbacks['self_reflection'](message=message)
315
+ # Execute callbacks
316
+ await execute_callback('self_reflection', message=message)
317
317
 
318
318
  console.print(Panel.fit(Text(message, style="bold yellow"), title="Self Reflection", border_style="magenta"))
319
319
 
@@ -325,8 +325,8 @@ async def adisplay_instruction(message: str, console=None, agent_name: str = Non
325
325
  console = Console()
326
326
  message = _clean_display_content(str(message))
327
327
 
328
- if 'instruction' in async_display_callbacks:
329
- await async_display_callbacks['instruction'](message=message)
328
+ # Execute callbacks
329
+ await execute_callback('instruction', message=message, agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools)
330
330
 
331
331
  # Display agent info if available
332
332
  if agent_name:
@@ -353,8 +353,8 @@ async def adisplay_tool_call(message: str, console=None):
353
353
  message = _clean_display_content(str(message))
354
354
  logging.debug(f"Cleaned message in adisplay_tool_call: {repr(message)}")
355
355
 
356
- if 'tool_call' in async_display_callbacks:
357
- await async_display_callbacks['tool_call'](message=message)
356
+ # Execute callbacks
357
+ await execute_callback('tool_call', message=message)
358
358
 
359
359
  console.print(Panel.fit(Text(message, style="bold cyan"), title="Tool Call", border_style="green"))
360
360
 
@@ -366,8 +366,8 @@ async def adisplay_error(message: str, console=None):
366
366
  console = Console()
367
367
  message = _clean_display_content(str(message))
368
368
 
369
- if 'error' in async_display_callbacks:
370
- await async_display_callbacks['error'](message=message)
369
+ # Execute callbacks
370
+ await execute_callback('error', message=message)
371
371
 
372
372
  console.print(Panel.fit(Text(message, style="bold red"), title="Error", border_style="red"))
373
373
  error_logs.append(message)
@@ -385,11 +385,8 @@ async def adisplay_generating(content: str = "", start_time: Optional[float] = N
385
385
 
386
386
  content = _clean_display_content(str(content))
387
387
 
388
- if 'generating' in async_display_callbacks:
389
- await async_display_callbacks['generating'](
390
- content=content,
391
- elapsed_time=elapsed_str.strip() if elapsed_str else None
392
- )
388
+ # Execute callbacks
389
+ await execute_callback('generating', content=content, elapsed_time=elapsed_str.strip() if elapsed_str else None)
393
390
 
394
391
  return Panel(Markdown(content), title=f"Generating...{elapsed_str}", border_style="green")
395
392
 
@@ -59,10 +59,90 @@ class Process:
59
59
 
60
60
  def _create_loop_subtasks(self, loop_task: Task):
61
61
  """Create subtasks for a loop task from input file."""
62
- logging.warning(f"_create_loop_subtasks called for {loop_task.name} but method not fully implemented")
63
- # TODO: Implement loop subtask creation from input file
64
- # This should read loop_task.input_file and create subtasks
65
- pass
62
+ if not loop_task.input_file:
63
+ logging.warning(f"_create_loop_subtasks called for {loop_task.name} but no input_file specified")
64
+ return
65
+
66
+ try:
67
+ file_ext = os.path.splitext(loop_task.input_file)[1].lower()
68
+ new_tasks = []
69
+
70
+ if file_ext == ".csv":
71
+ with open(loop_task.input_file, "r", encoding="utf-8") as f:
72
+ reader = csv.reader(f, quotechar='"', escapechar='\\')
73
+ previous_task = None
74
+ task_count = 0
75
+
76
+ for i, row in enumerate(reader):
77
+ if not row: # Skip empty rows
78
+ continue
79
+
80
+ # Handle Q&A pairs with potential commas
81
+ task_desc = row[0].strip() if row else ""
82
+ if len(row) > 1:
83
+ question = row[0].strip()
84
+ answer = ",".join(field.strip() for field in row[1:])
85
+ task_desc = f"Question: {question}\nAnswer: {answer}"
86
+
87
+ if not task_desc: # Skip rows with empty content
88
+ continue
89
+
90
+ task_count += 1
91
+ logging.debug(f"Creating subtask from CSV row {i+1}: {task_desc}")
92
+
93
+ row_task = Task(
94
+ description=f"{loop_task.description}\n{task_desc}" if loop_task.description else task_desc,
95
+ agent=loop_task.agent,
96
+ name=f"{loop_task.name}_{task_count}" if loop_task.name else task_desc,
97
+ expected_output=getattr(loop_task, 'expected_output', None),
98
+ callback=loop_task.callback, # Inherit callback from parent loop task
99
+ is_start=(task_count == 1),
100
+ task_type="task"
101
+ )
102
+ self.tasks[row_task.id] = row_task
103
+ new_tasks.append(row_task)
104
+
105
+ if previous_task:
106
+ previous_task.next_tasks = [row_task.name]
107
+ previous_task = row_task
108
+
109
+ logging.info(f"Created {task_count} subtasks from CSV file for {loop_task.name}")
110
+ else:
111
+ # Handle text files
112
+ with open(loop_task.input_file, "r", encoding="utf-8") as f:
113
+ lines = f.read().splitlines()
114
+ previous_task = None
115
+ for i, line in enumerate(lines):
116
+ if not line.strip(): # Skip empty lines
117
+ continue
118
+
119
+ row_task = Task(
120
+ description=f"{loop_task.description}\n{line.strip()}" if loop_task.description else line.strip(),
121
+ agent=loop_task.agent,
122
+ name=f"{loop_task.name}_{i+1}" if loop_task.name else line.strip(),
123
+ expected_output=getattr(loop_task, 'expected_output', None),
124
+ callback=loop_task.callback, # Inherit callback from parent loop task
125
+ is_start=(i == 0),
126
+ task_type="task"
127
+ )
128
+ self.tasks[row_task.id] = row_task
129
+ new_tasks.append(row_task)
130
+
131
+ if previous_task:
132
+ previous_task.next_tasks = [row_task.name]
133
+ previous_task = row_task
134
+
135
+ logging.info(f"Created {len(new_tasks)} subtasks from text file for {loop_task.name}")
136
+
137
+ if new_tasks and loop_task.next_tasks:
138
+ # Connect last subtask to loop task's next tasks
139
+ last_task = new_tasks[-1]
140
+ last_task.next_tasks = loop_task.next_tasks
141
+
142
+ except Exception as e:
143
+ logging.error(f"Failed to create subtasks for loop task {loop_task.name}: {e}")
144
+ import traceback
145
+ traceback.print_exc()
66
146
 
67
147
  def _build_task_context(self, current_task: Task) -> str:
68
148
  """Build context for a task based on its retain_full_context setting"""
@@ -809,6 +889,7 @@ Provide a JSON with the structure:
809
889
  agent=start_task.agent,
810
890
  name=f"{start_task.name}_{task_count}" if start_task.name else task_desc,
811
891
  expected_output=getattr(start_task, 'expected_output', None),
892
+ callback=start_task.callback, # Inherit callback from parent loop task
812
893
  is_start=(task_count == 1),
813
894
  task_type="decision", # Change to decision type
814
895
  next_tasks=inherited_next_tasks, # Inherit parent's next tasks
@@ -842,6 +923,7 @@ Provide a JSON with the structure:
842
923
  agent=start_task.agent,
843
924
  name=f"{start_task.name}_{i+1}" if start_task.name else line.strip(),
844
925
  expected_output=getattr(start_task, 'expected_output', None),
926
+ callback=start_task.callback, # Inherit callback from parent loop task
845
927
  is_start=(i == 0),
846
928
  task_type="task",
847
929
  condition={
@@ -925,6 +1007,7 @@ Tasks by type:
925
1007
  agent=current_task.agent,
926
1008
  name=f"{current_task.name}_{i+1}" if current_task.name else task_desc,
927
1009
  expected_output=getattr(current_task, 'expected_output', None),
1010
+ callback=current_task.callback, # Inherit callback from parent loop task
928
1011
  is_start=(i == 0),
929
1012
  task_type="task",
930
1013
  condition={
@@ -949,6 +1032,7 @@ Tasks by type:
949
1032
  agent=current_task.agent,
950
1033
  name=f"{current_task.name}_{i+1}" if current_task.name else line.strip(),
951
1034
  expected_output=getattr(current_task, 'expected_output', None),
1035
+ callback=current_task.callback, # Inherit callback from parent loop task
952
1036
  is_start=(i == 0),
953
1037
  task_type="task",
954
1038
  condition={
@@ -381,13 +381,10 @@ class Task:
381
381
  logger.exception(e) # Print full stack trace
382
382
  # Continue execution even if memory operations fail
383
383
 
384
- # Execute original callback
384
+ # Execute original callback with metadata support
385
385
  if self.callback:
386
386
  try:
387
- if asyncio.iscoroutinefunction(self.callback):
388
- await self.callback(task_output)
389
- else:
390
- self.callback(task_output)
387
+ await self._execute_callback_with_metadata(task_output)
391
388
  except Exception as e:
392
389
  logger.error(f"Task {self.id}: Failed to execute callback: {e}")
393
390
  logger.exception(e)
@@ -470,4 +467,63 @@ Context:
470
467
  success=False,
471
468
  result=None,
472
469
  error=f"Guardrail validation error: {str(e)}"
473
- )
470
+ )
471
+
472
+ async def _execute_callback_with_metadata(self, task_output):
473
+ """Execute callback with metadata support while maintaining backward compatibility.
474
+
475
+ This method automatically detects the callback signature:
476
+ - Single parameter callbacks receive only TaskOutput (backward compatible)
477
+ - Two parameter callbacks receive TaskOutput and metadata dict (enhanced)
478
+
479
+ Args:
480
+ task_output: The TaskOutput object to pass to the callback
481
+ """
482
+ if not self.callback:
483
+ return
484
+
485
+ try:
486
+ # Inspect the callback signature to determine parameter count
487
+ sig = inspect.signature(self.callback)
488
+ param_count = len(sig.parameters)
489
+
490
+ if param_count == 1:
491
+ # Backward compatible: single parameter callback
492
+ if asyncio.iscoroutinefunction(self.callback):
493
+ await self.callback(task_output)
494
+ else:
495
+ self.callback(task_output)
496
+ elif param_count >= 2:
497
+ # Enhanced: two parameter callback with metadata
498
+ metadata = {
499
+ 'task_id': self.id,
500
+ 'task_name': self.name,
501
+ 'agent_name': self.agent.name if self.agent else None,
502
+ 'task_type': self.task_type,
503
+ 'task_status': self.status,
504
+ 'task_description': self.description,
505
+ 'expected_output': self.expected_output,
506
+ 'input_file': getattr(self, 'input_file', None),
507
+ 'loop_state': self.loop_state,
508
+ 'retry_count': getattr(self, 'retry_count', 0),
509
+ 'async_execution': self.async_execution
510
+ }
511
+
512
+ if asyncio.iscoroutinefunction(self.callback):
513
+ await self.callback(task_output, metadata)
514
+ else:
515
+ self.callback(task_output, metadata)
516
+ else:
517
+ # No parameter callback - unusual but handle gracefully
518
+ if asyncio.iscoroutinefunction(self.callback):
519
+ await self.callback()
520
+ else:
521
+ self.callback()
522
+
523
+ except TypeError as e:
524
+ # Fallback for signature inspection issues
525
+ logger.warning(f"Task {self.id}: Callback signature inspection failed, falling back to single parameter: {e}")
526
+ if asyncio.iscoroutinefunction(self.callback):
527
+ await self.callback(task_output)
528
+ else:
529
+ self.callback(task_output)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.131
3
+ Version: 0.0.132
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.131"
7
+ version = "0.0.132"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [