praisonaiagents 0.0.138__tar.gz → 0.0.140__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/agent/agent.py +8 -4
  3. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/agents/agents.py +10 -4
  4. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/llm/llm.py +39 -13
  5. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents.egg-info/PKG-INFO +1 -1
  6. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/pyproject.toml +1 -1
  7. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/README.md +0 -0
  8. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/__init__.py +0 -0
  9. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/agent/__init__.py +0 -0
  10. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/agent/handoff.py +0 -0
  11. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/agent/image_agent.py +0 -0
  12. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/agent/router_agent.py +0 -0
  13. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/agents/__init__.py +0 -0
  14. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/agents/autoagents.py +0 -0
  15. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/approval.py +0 -0
  16. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/guardrails/__init__.py +0 -0
  17. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  18. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  19. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/knowledge/__init__.py +0 -0
  20. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/knowledge/chunking.py +0 -0
  21. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/knowledge/knowledge.py +0 -0
  22. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/llm/__init__.py +0 -0
  23. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/llm/model_capabilities.py +0 -0
  24. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/llm/model_router.py +0 -0
  25. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/llm/openai_client.py +0 -0
  26. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/main.py +0 -0
  27. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/mcp/__init__.py +0 -0
  28. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/mcp/mcp.py +0 -0
  29. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
  30. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/mcp/mcp_sse.py +0 -0
  31. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/memory/__init__.py +0 -0
  32. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/memory/memory.py +0 -0
  33. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/process/__init__.py +0 -0
  34. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/process/process.py +0 -0
  35. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/session.py +0 -0
  36. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/task/__init__.py +0 -0
  37. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/task/task.py +0 -0
  38. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/telemetry/__init__.py +0 -0
  39. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/telemetry/integration.py +0 -0
  40. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/telemetry/telemetry.py +0 -0
  41. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/README.md +0 -0
  42. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/__init__.py +0 -0
  43. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/arxiv_tools.py +0 -0
  44. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/calculator_tools.py +0 -0
  45. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/csv_tools.py +0 -0
  46. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/duckdb_tools.py +0 -0
  47. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  48. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/excel_tools.py +0 -0
  49. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/file_tools.py +0 -0
  50. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/json_tools.py +0 -0
  51. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/newspaper_tools.py +0 -0
  52. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/pandas_tools.py +0 -0
  53. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/python_tools.py +0 -0
  54. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/searxng_tools.py +0 -0
  55. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/shell_tools.py +0 -0
  56. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/spider_tools.py +0 -0
  57. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/test.py +0 -0
  58. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/tools.py +0 -0
  59. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  60. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  61. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/xml_tools.py +0 -0
  62. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/yaml_tools.py +0 -0
  63. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents/tools/yfinance_tools.py +0 -0
  64. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  65. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  66. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents.egg-info/requires.txt +0 -0
  67. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/praisonaiagents.egg-info/top_level.txt +0 -0
  68. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/setup.cfg +0 -0
  69. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test-graph-memory.py +0 -0
  70. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test.py +0 -0
  71. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_fix_comprehensive.py +0 -0
  72. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_handoff_compatibility.py +0 -0
  73. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_http_stream_basic.py +0 -0
  74. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_llm_self_reflection_direct.py +0 -0
  75. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_ollama_async_fix.py +0 -0
  76. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_ollama_fix.py +0 -0
  77. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_ollama_sequential_fix.py +0 -0
  78. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_posthog_fixed.py +0 -0
  79. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_self_reflection_comprehensive.py +0 -0
  80. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_self_reflection_fix_simple.py +0 -0
  81. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_self_reflection_fix_verification.py +0 -0
  82. {praisonaiagents-0.0.138 → praisonaiagents-0.0.140}/tests/test_validation_feedback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.138
3
+ Version: 0.0.140
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1536,7 +1536,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1536
1536
  cleaned = cleaned[:-3].strip()
1537
1537
  return cleaned
1538
1538
 
1539
- async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
1539
+ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
1540
1540
  """Async version of chat method with self-reflection support."""
1541
1541
  # Log all parameter values when in debug mode
1542
1542
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
@@ -1944,7 +1944,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1944
1944
  prompt = task
1945
1945
  else:
1946
1946
  prompt = str(task)
1947
- return await self.achat(prompt)
1947
+ # Extract task info if available
1948
+ task_name = getattr(task, 'name', None)
1949
+ task_description = getattr(task, 'description', None)
1950
+ task_id = getattr(task, 'id', None)
1951
+ return await self.achat(prompt, task_name=task_name, task_description=task_description, task_id=task_id)
1948
1952
 
1949
1953
  async def execute_tool_async(self, function_name: str, arguments: Dict[str, Any]) -> Any:
1950
1954
  """Async version of execute_tool"""
@@ -2113,7 +2117,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2113
2117
  try:
2114
2118
  # Use async version if available, otherwise use sync version
2115
2119
  if asyncio.iscoroutinefunction(self.chat):
2116
- response = await self.achat(query)
2120
+ response = await self.achat(query, task_name=None, task_description=None, task_id=None)
2117
2121
  else:
2118
2122
  # Run sync function in a thread to avoid blocking
2119
2123
  loop = asyncio.get_event_loop()
@@ -2234,7 +2238,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2234
2238
  try:
2235
2239
  # Ensure self.achat is used as it's the async version and pass its tools
2236
2240
  if hasattr(self, 'achat') and asyncio.iscoroutinefunction(self.achat):
2237
- response = await self.achat(prompt, tools=self.tools)
2241
+ response = await self.achat(prompt, tools=self.tools, task_name=None, task_description=None, task_id=None)
2238
2242
  elif hasattr(self, 'chat'): # Fallback for synchronous chat
2239
2243
  loop = asyncio.get_event_loop()
2240
2244
  response = await loop.run_in_executor(None, lambda p=prompt: self.chat(p, tools=self.tools))
@@ -362,14 +362,20 @@ Context:
362
362
  _get_multimodal_message(task_prompt, task.images),
363
363
  tools=tools,
364
364
  output_json=task.output_json,
365
- output_pydantic=task.output_pydantic
365
+ output_pydantic=task.output_pydantic,
366
+ task_name=task.name,
367
+ task_description=task.description,
368
+ task_id=task.id
366
369
  )
367
370
  else:
368
371
  agent_output = await executor_agent.achat(
369
372
  task_prompt,
370
373
  tools=tools,
371
374
  output_json=task.output_json,
372
- output_pydantic=task.output_pydantic
375
+ output_pydantic=task.output_pydantic,
376
+ task_name=task.name,
377
+ task_description=task.description,
378
+ task_id=task.id
373
379
  )
374
380
 
375
381
  if agent_output:
@@ -1138,7 +1144,7 @@ Context:
1138
1144
  try:
1139
1145
  # Use async version if available, otherwise use sync version
1140
1146
  if asyncio.iscoroutinefunction(agent_instance.chat):
1141
- response = await agent_instance.achat(current_input)
1147
+ response = await agent_instance.achat(current_input, task_name=None, task_description=None, task_id=None)
1142
1148
  else:
1143
1149
  # Run sync function in a thread to avoid blocking
1144
1150
  loop = asyncio.get_running_loop()
@@ -1294,7 +1300,7 @@ Context:
1294
1300
  try:
1295
1301
  logging.debug(f"Processing with agent: {agent_instance.name}")
1296
1302
  if hasattr(agent_instance, 'achat') and asyncio.iscoroutinefunction(agent_instance.achat):
1297
- response = await agent_instance.achat(current_input, tools=agent_instance.tools)
1303
+ response = await agent_instance.achat(current_input, tools=agent_instance.tools, task_name=None, task_description=None, task_id=None)
1298
1304
  elif hasattr(agent_instance, 'chat'): # Fallback to sync chat if achat not suitable
1299
1305
  loop = asyncio.get_running_loop()
1300
1306
  response = await loop.run_in_executor(None, lambda ci=current_input: agent_instance.chat(ci, tools=agent_instance.tools))
@@ -93,6 +93,9 @@ class LLM:
93
93
  # Ollama-specific prompt constants
94
94
  OLLAMA_TOOL_USAGE_PROMPT = "Please analyze the request and use the available tools to help answer the question. Start by identifying what information you need."
95
95
  OLLAMA_FINAL_ANSWER_PROMPT = "Based on the tool results above, please provide the final answer to the original question."
96
+
97
+ # Ollama iteration threshold for summary generation
98
+ OLLAMA_SUMMARY_ITERATION_THRESHOLD = 3
96
99
 
97
100
  def _log_llm_config(self, method_name: str, **config):
98
101
  """Centralized debug logging for LLM configuration and parameters.
@@ -323,9 +326,8 @@ class LLM:
323
326
  if not (self._is_ollama_provider() and tool_results):
324
327
  return None
325
328
 
326
- # If response is substantial, no summary needed
327
- if response_text and len(response_text.strip()) > OLLAMA_MIN_RESPONSE_LENGTH:
328
- return None
329
+ # For Ollama, always generate summary when we have tool results
330
+ # This prevents infinite loops caused by empty/minimal responses
329
331
 
330
332
  # Build tool summary efficiently
331
333
  summary_lines = ["Based on the tool execution results:"]
@@ -827,6 +829,7 @@ class LLM:
827
829
  iteration_count = 0
828
830
  final_response_text = ""
829
831
  stored_reasoning_content = None # Store reasoning content from tool execution
832
+ accumulated_tool_results = [] # Store all tool results across iterations
830
833
 
831
834
  while iteration_count < max_iterations:
832
835
  try:
@@ -1070,7 +1073,7 @@ class LLM:
1070
1073
  })
1071
1074
 
1072
1075
  should_continue = False
1073
- tool_results = [] # Store all tool results
1076
+ tool_results = [] # Store current iteration tool results
1074
1077
  for tool_call in tool_calls:
1075
1078
  # Handle both object and dict access patterns
1076
1079
  is_ollama = self._is_ollama_provider()
@@ -1084,6 +1087,7 @@ class LLM:
1084
1087
  tool_result = execute_tool_fn(function_name, arguments)
1085
1088
  logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
1086
1089
  tool_results.append(tool_result) # Store the result
1090
+ accumulated_tool_results.append(tool_result) # Accumulate across iterations
1087
1091
 
1088
1092
  if verbose:
1089
1093
  display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -1121,15 +1125,25 @@ class LLM:
1121
1125
 
1122
1126
  # Check if the LLM provided a final answer alongside the tool calls
1123
1127
  # If response_text contains substantive content, treat it as the final answer
1124
- if response_text and response_text.strip() and len(response_text.strip()) > 10:
1128
+ if response_text and len(response_text.strip()) > 10:
1125
1129
  # LLM provided a final answer after tool execution, don't continue
1126
1130
  final_response_text = response_text.strip()
1127
1131
  break
1128
1132
 
1129
1133
  # Special handling for Ollama to prevent infinite loops
1130
- tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1131
- if tool_summary:
1132
- final_response_text = tool_summary
1134
+ # Only generate summary after multiple iterations to allow sequential execution
1135
+ if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1136
+ tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1137
+ if tool_summary:
1138
+ final_response_text = tool_summary
1139
+ break
1140
+
1141
+ # Safety check: prevent infinite loops for any provider
1142
+ if iteration_count >= 5:
1143
+ if tool_results:
1144
+ final_response_text = "Task completed successfully based on tool execution results."
1145
+ else:
1146
+ final_response_text = response_text.strip() if response_text else "Task completed."
1133
1147
  break
1134
1148
 
1135
1149
  # Otherwise, continue the loop to check if more tools are needed
@@ -1579,6 +1593,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1579
1593
  iteration_count = 0
1580
1594
  final_response_text = ""
1581
1595
  stored_reasoning_content = None # Store reasoning content from tool execution
1596
+ accumulated_tool_results = [] # Store all tool results across iterations
1582
1597
 
1583
1598
  while iteration_count < max_iterations:
1584
1599
  response_text = ""
@@ -1749,7 +1764,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1749
1764
  "tool_calls": serializable_tool_calls
1750
1765
  })
1751
1766
 
1752
- tool_results = [] # Store all tool results
1767
+ tool_results = [] # Store current iteration tool results
1753
1768
  for tool_call in tool_calls:
1754
1769
  # Handle both object and dict access patterns
1755
1770
  is_ollama = self._is_ollama_provider()
@@ -1761,6 +1776,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1761
1776
 
1762
1777
  tool_result = await execute_tool_fn(function_name, arguments)
1763
1778
  tool_results.append(tool_result) # Store the result
1779
+ accumulated_tool_results.append(tool_result) # Accumulate across iterations
1764
1780
 
1765
1781
  if verbose:
1766
1782
  display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -1888,15 +1904,25 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1888
1904
 
1889
1905
  # Check if the LLM provided a final answer alongside the tool calls
1890
1906
  # If response_text contains substantive content, treat it as the final answer
1891
- if response_text and response_text.strip() and len(response_text.strip()) > 10:
1907
+ if response_text and len(response_text.strip()) > 10:
1892
1908
  # LLM provided a final answer after tool execution, don't continue
1893
1909
  final_response_text = response_text.strip()
1894
1910
  break
1895
1911
 
1896
1912
  # Special handling for Ollama to prevent infinite loops
1897
- tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1898
- if tool_summary:
1899
- final_response_text = tool_summary
1913
+ # Only generate summary after multiple iterations to allow sequential execution
1914
+ if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1915
+ tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1916
+ if tool_summary:
1917
+ final_response_text = tool_summary
1918
+ break
1919
+
1920
+ # Safety check: prevent infinite loops for any provider
1921
+ if iteration_count >= 5:
1922
+ if tool_results:
1923
+ final_response_text = "Task completed successfully based on tool execution results."
1924
+ else:
1925
+ final_response_text = response_text.strip() if response_text else "Task completed."
1900
1926
  break
1901
1927
 
1902
1928
  # Continue the loop to check if more tools are needed
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.138
3
+ Version: 0.0.140
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.138"
7
+ version = "0.0.140"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [