praisonaiagents 0.0.140__tar.gz → 0.0.141__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/agent/agent.py +38 -35
  3. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/llm/llm.py +69 -26
  4. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents.egg-info/PKG-INFO +1 -1
  5. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/pyproject.toml +1 -1
  6. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/README.md +0 -0
  7. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/__init__.py +0 -0
  8. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/agent/__init__.py +0 -0
  9. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/agent/handoff.py +0 -0
  10. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/agent/image_agent.py +0 -0
  11. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/agent/router_agent.py +0 -0
  12. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/agents/__init__.py +0 -0
  13. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/agents/agents.py +0 -0
  14. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/agents/autoagents.py +0 -0
  15. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/approval.py +0 -0
  16. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/guardrails/__init__.py +0 -0
  17. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  18. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  19. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/knowledge/__init__.py +0 -0
  20. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/knowledge/chunking.py +0 -0
  21. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/knowledge/knowledge.py +0 -0
  22. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/llm/__init__.py +0 -0
  23. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/llm/model_capabilities.py +0 -0
  24. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/llm/model_router.py +0 -0
  25. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/llm/openai_client.py +0 -0
  26. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/main.py +0 -0
  27. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/mcp/__init__.py +0 -0
  28. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/mcp/mcp.py +0 -0
  29. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
  30. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/mcp/mcp_sse.py +0 -0
  31. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/memory/__init__.py +0 -0
  32. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/memory/memory.py +0 -0
  33. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/process/__init__.py +0 -0
  34. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/process/process.py +0 -0
  35. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/session.py +0 -0
  36. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/task/__init__.py +0 -0
  37. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/task/task.py +0 -0
  38. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/telemetry/__init__.py +0 -0
  39. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/telemetry/integration.py +0 -0
  40. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/telemetry/telemetry.py +0 -0
  41. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/README.md +0 -0
  42. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/__init__.py +0 -0
  43. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/arxiv_tools.py +0 -0
  44. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/calculator_tools.py +0 -0
  45. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/csv_tools.py +0 -0
  46. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/duckdb_tools.py +0 -0
  47. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  48. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/excel_tools.py +0 -0
  49. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/file_tools.py +0 -0
  50. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/json_tools.py +0 -0
  51. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/newspaper_tools.py +0 -0
  52. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/pandas_tools.py +0 -0
  53. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/python_tools.py +0 -0
  54. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/searxng_tools.py +0 -0
  55. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/shell_tools.py +0 -0
  56. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/spider_tools.py +0 -0
  57. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/test.py +0 -0
  58. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/tools.py +0 -0
  59. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  60. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  61. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/xml_tools.py +0 -0
  62. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/yaml_tools.py +0 -0
  63. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents/tools/yfinance_tools.py +0 -0
  64. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  65. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  66. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents.egg-info/requires.txt +0 -0
  67. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/praisonaiagents.egg-info/top_level.txt +0 -0
  68. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/setup.cfg +0 -0
  69. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test-graph-memory.py +0 -0
  70. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test.py +0 -0
  71. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_fix_comprehensive.py +0 -0
  72. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_handoff_compatibility.py +0 -0
  73. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_http_stream_basic.py +0 -0
  74. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_llm_self_reflection_direct.py +0 -0
  75. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_ollama_async_fix.py +0 -0
  76. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_ollama_fix.py +0 -0
  77. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_ollama_sequential_fix.py +0 -0
  78. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_posthog_fixed.py +0 -0
  79. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_self_reflection_comprehensive.py +0 -0
  80. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_self_reflection_fix_simple.py +0 -0
  81. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_self_reflection_fix_verification.py +0 -0
  82. {praisonaiagents-0.0.140 → praisonaiagents-0.0.141}/tests/test_validation_feedback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.140
3
+ Version: 0.0.141
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -653,7 +653,7 @@ Your Goal: {self.goal}
653
653
  error=f"Agent guardrail validation error: {str(e)}"
654
654
  )
655
655
 
656
- def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, tools=None):
656
+ def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, tools=None, task_name=None, task_description=None, task_id=None):
657
657
  """Apply guardrail validation with retry logic.
658
658
 
659
659
  Args:
@@ -707,7 +707,7 @@ Your Goal: {self.goal}
707
707
  # Regenerate response for retry
708
708
  try:
709
709
  retry_prompt = f"{prompt}\n\nNote: Previous response failed validation due to: {guardrail_result.error}. Please provide an improved response."
710
- response = self._chat_completion([{"role": "user", "content": retry_prompt}], temperature, tools)
710
+ response = self._chat_completion([{"role": "user", "content": retry_prompt}], temperature, tools, task_name=task_name, task_description=task_description, task_id=task_id)
711
711
  if response and response.choices:
712
712
  current_response = response.choices[0].message.content.strip()
713
713
  else:
@@ -1072,7 +1072,7 @@ Your Goal: {self.goal}"""
1072
1072
  reasoning_steps=reasoning_steps
1073
1073
  )
1074
1074
 
1075
- def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
1075
+ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
1076
1076
  start_time = time.time()
1077
1077
  logging.debug(f"{self.name} sending messages to LLM: {messages}")
1078
1078
 
@@ -1128,11 +1128,6 @@ Your Goal: {self.goal}"""
1128
1128
  )
1129
1129
  else:
1130
1130
  # Use the standard OpenAI client approach with tool support
1131
- def custom_display_fn(text, start_time):
1132
- if self.verbose:
1133
- return display_generating(text, start_time)
1134
- return ""
1135
-
1136
1131
  # Note: openai_client expects tools in various formats and will format them internally
1137
1132
  # But since we already have formatted_tools, we can pass them directly
1138
1133
  if self._openai_client is None:
@@ -1145,8 +1140,8 @@ Your Goal: {self.goal}"""
1145
1140
  tools=formatted_tools, # Already formatted for OpenAI
1146
1141
  execute_tool_fn=self.execute_tool,
1147
1142
  stream=stream,
1148
- console=self.console if self.verbose else None,
1149
- display_fn=display_generating if stream and self.verbose else None,
1143
+ console=self.console if (self.verbose or stream) else None,
1144
+ display_fn=display_generating if stream else None,
1150
1145
  reasoning_steps=reasoning_steps,
1151
1146
  verbose=self.verbose,
1152
1147
  max_iterations=10
@@ -1158,7 +1153,7 @@ Your Goal: {self.goal}"""
1158
1153
  display_error(f"Error in chat completion: {e}")
1159
1154
  return None
1160
1155
 
1161
- def _execute_callback_and_display(self, prompt: str, response: str, generation_time: float):
1156
+ def _execute_callback_and_display(self, prompt: str, response: str, generation_time: float, task_name=None, task_description=None, task_id=None):
1162
1157
  """Helper method to execute callbacks and display interaction.
1163
1158
 
1164
1159
  This centralizes the logic for callback execution and display to avoid duplication.
@@ -1174,9 +1169,9 @@ Your Goal: {self.goal}"""
1174
1169
  agent_name=self.name,
1175
1170
  agent_role=self.role,
1176
1171
  agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1177
- task_name=None, # Not available in this context
1178
- task_description=None, # Not available in this context
1179
- task_id=None # Not available in this context
1172
+ task_name=task_name,
1173
+ task_description=task_description,
1174
+ task_id=task_id
1180
1175
  )
1181
1176
  # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1182
1177
  if self.verbose and not self._using_custom_llm:
@@ -1185,9 +1180,9 @@ Your Goal: {self.goal}"""
1185
1180
  agent_name=self.name,
1186
1181
  agent_role=self.role,
1187
1182
  agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1188
- task_name=None, # Not available in this context
1189
- task_description=None, # Not available in this context
1190
- task_id=None) # Not available in this context
1183
+ task_name=task_name,
1184
+ task_description=task_description,
1185
+ task_id=task_id)
1191
1186
 
1192
1187
  def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True, task_name=None, task_description=None, task_id=None):
1193
1188
  # Log all parameter values when in debug mode
@@ -1297,7 +1292,7 @@ Your Goal: {self.goal}"""
1297
1292
 
1298
1293
  # Apply guardrail validation for custom LLM response
1299
1294
  try:
1300
- validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1295
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools, task_name, task_description, task_id)
1301
1296
  return validated_response
1302
1297
  except Exception as e:
1303
1298
  logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
@@ -1357,7 +1352,7 @@ Your Goal: {self.goal}"""
1357
1352
  agent_tools=agent_tools
1358
1353
  )
1359
1354
 
1360
- response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream)
1355
+ response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream, task_name=task_name, task_description=task_description, task_id=task_id)
1361
1356
  if not response:
1362
1357
  # Rollback chat history on response failure
1363
1358
  self.chat_history = self.chat_history[:chat_history_length]
@@ -1372,9 +1367,9 @@ Your Goal: {self.goal}"""
1372
1367
  self.chat_history.append({"role": "assistant", "content": response_text})
1373
1368
  # Apply guardrail validation even for JSON output
1374
1369
  try:
1375
- validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1370
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
1376
1371
  # Execute callback after validation
1377
- self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1372
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
1378
1373
  return validated_response
1379
1374
  except Exception as e:
1380
1375
  logging.error(f"Agent {self.name}: Guardrail validation failed for JSON output: {e}")
@@ -1391,9 +1386,9 @@ Your Goal: {self.goal}"""
1391
1386
  if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
1392
1387
  # Apply guardrail to reasoning content
1393
1388
  try:
1394
- validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools)
1389
+ validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools, task_name, task_description, task_id)
1395
1390
  # Execute callback after validation
1396
- self._execute_callback_and_display(original_prompt, validated_reasoning, time.time() - start_time)
1391
+ self._execute_callback_and_display(original_prompt, validated_reasoning, time.time() - start_time, task_name, task_description, task_id)
1397
1392
  return validated_reasoning
1398
1393
  except Exception as e:
1399
1394
  logging.error(f"Agent {self.name}: Guardrail validation failed for reasoning content: {e}")
@@ -1402,9 +1397,9 @@ Your Goal: {self.goal}"""
1402
1397
  return None
1403
1398
  # Apply guardrail to regular response
1404
1399
  try:
1405
- validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1400
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
1406
1401
  # Execute callback after validation
1407
- self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1402
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
1408
1403
  return validated_response
1409
1404
  except Exception as e:
1410
1405
  logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
@@ -1426,7 +1421,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1426
1421
  if self._using_custom_llm or self._openai_client is None:
1427
1422
  # For custom LLMs, we need to handle reflection differently
1428
1423
  # Use non-streaming to get complete JSON response
1429
- reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False)
1424
+ reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False, task_name=task_name, task_description=task_description, task_id=task_id)
1430
1425
 
1431
1426
  if not reflection_response or not reflection_response.choices:
1432
1427
  raise Exception("No response from reflection request")
@@ -1470,9 +1465,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1470
1465
  self.chat_history.append({"role": "assistant", "content": response_text})
1471
1466
  # Apply guardrail validation after satisfactory reflection
1472
1467
  try:
1473
- validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1468
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
1474
1469
  # Execute callback after validation
1475
- self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1470
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
1476
1471
  return validated_response
1477
1472
  except Exception as e:
1478
1473
  logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
@@ -1488,9 +1483,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1488
1483
  self.chat_history.append({"role": "assistant", "content": response_text})
1489
1484
  # Apply guardrail validation after max reflections
1490
1485
  try:
1491
- validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1486
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
1492
1487
  # Execute callback after validation
1493
- self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1488
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
1494
1489
  return validated_response
1495
1490
  except Exception as e:
1496
1491
  logging.error(f"Agent {self.name}: Guardrail validation failed after max reflections: {e}")
@@ -1503,7 +1498,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1503
1498
  messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
1504
1499
  # For custom LLMs during reflection, always use non-streaming to ensure complete responses
1505
1500
  use_stream = self.stream if not self._using_custom_llm else False
1506
- response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream)
1501
+ response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream, task_name=task_name, task_description=task_description, task_id=task_id)
1507
1502
  response_text = response.choices[0].message.content.strip()
1508
1503
  reflection_count += 1
1509
1504
  continue # Continue the loop for more reflections
@@ -1620,7 +1615,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1620
1615
 
1621
1616
  # Apply guardrail validation for custom LLM response
1622
1617
  try:
1623
- validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1618
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools, task_name, task_description, task_id)
1619
+ # Execute callback after validation
1620
+ self._execute_callback_and_display(normalized_content, validated_response, time.time() - start_time, task_name, task_description, task_id)
1624
1621
  return validated_response
1625
1622
  except Exception as e:
1626
1623
  logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
@@ -1697,6 +1694,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1697
1694
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1698
1695
  total_time = time.time() - start_time
1699
1696
  logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1697
+ # Execute callback after tool completion
1698
+ self._execute_callback_and_display(original_prompt, result, time.time() - start_time, task_name, task_description, task_id)
1700
1699
  return result
1701
1700
  elif output_json or output_pydantic:
1702
1701
  response = await self._openai_client.async_client.chat.completions.create(
@@ -1705,11 +1704,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1705
1704
  temperature=temperature,
1706
1705
  response_format={"type": "json_object"}
1707
1706
  )
1708
- # Return the raw response
1707
+ response_text = response.choices[0].message.content
1709
1708
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1710
1709
  total_time = time.time() - start_time
1711
1710
  logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1712
- return response.choices[0].message.content
1711
+ # Execute callback after JSON/Pydantic completion
1712
+ self._execute_callback_and_display(original_prompt, response_text, time.time() - start_time, task_name, task_description, task_id)
1713
+ return response_text
1713
1714
  else:
1714
1715
  response = await self._openai_client.async_client.chat.completions.create(
1715
1716
  model=self.llm,
@@ -1804,7 +1805,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1804
1805
 
1805
1806
  # Apply guardrail validation for OpenAI client response
1806
1807
  try:
1807
- validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1808
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
1809
+ # Execute callback after validation
1810
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
1808
1811
  return validated_response
1809
1812
  except Exception as e:
1810
1813
  logging.error(f"Agent {self.name}: Guardrail validation failed for OpenAI client: {e}")
@@ -477,6 +477,49 @@ class LLM:
477
477
  logging.debug(f"[OLLAMA_FIX] Error validating arguments for {function_name}: {e}")
478
478
  return arguments
479
479
 
480
+ def _handle_ollama_sequential_logic(self, iteration_count: int, accumulated_tool_results: List[Any],
481
+ response_text: str, messages: List[Dict]) -> tuple:
482
+ """
483
+ Handle Ollama sequential tool execution logic to prevent premature tool summary generation.
484
+
485
+ This method implements the two-step process:
486
+ 1. After reaching threshold with tool results, add explicit final answer prompt
487
+ 2. Only generate tool summary if LLM still doesn't respond after explicit prompt
488
+
489
+ Args:
490
+ iteration_count: Current iteration count
491
+ accumulated_tool_results: List of tool results from all iterations
492
+ response_text: Current LLM response text
493
+ messages: Message history list to potentially modify
494
+
495
+ Returns:
496
+ tuple: (should_break, final_response_text, iteration_count)
497
+ - should_break: Whether to break the iteration loop
498
+ - final_response_text: Text to use as final response (None if continuing)
499
+ - iteration_count: Updated iteration count
500
+ """
501
+ if not (self._is_ollama_provider() and iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD):
502
+ return False, None, iteration_count
503
+
504
+ # For Ollama: if we have meaningful tool results but empty responses,
505
+ # give LLM one final chance with explicit prompt for final answer
506
+ if accumulated_tool_results and iteration_count == self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
507
+ # Add explicit prompt asking for final answer
508
+ messages.append({
509
+ "role": "user",
510
+ "content": self.OLLAMA_FINAL_ANSWER_PROMPT
511
+ })
512
+ # Continue to next iteration to get the final response
513
+ iteration_count += 1
514
+ return False, None, iteration_count
515
+ else:
516
+ # If still no response after final answer prompt, generate summary
517
+ tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
518
+ if tool_summary:
519
+ return True, tool_summary, iteration_count
520
+
521
+ return False, None, iteration_count
522
+
480
523
  def _needs_system_message_skip(self) -> bool:
481
524
  """Check if this model requires skipping system messages"""
482
525
  if not self.model:
@@ -1132,11 +1175,15 @@ class LLM:
1132
1175
 
1133
1176
  # Special handling for Ollama to prevent infinite loops
1134
1177
  # Only generate summary after multiple iterations to allow sequential execution
1135
- if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1136
- tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1137
- if tool_summary:
1138
- final_response_text = tool_summary
1139
- break
1178
+ should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
1179
+ iteration_count, accumulated_tool_results, response_text, messages
1180
+ )
1181
+ if should_break:
1182
+ final_response_text = tool_summary_text
1183
+ break
1184
+ elif tool_summary_text is None and iteration_count > self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1185
+ # Continue iteration after adding final answer prompt
1186
+ continue
1140
1187
 
1141
1188
  # Safety check: prevent infinite loops for any provider
1142
1189
  if iteration_count >= 5:
@@ -1911,11 +1958,15 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1911
1958
 
1912
1959
  # Special handling for Ollama to prevent infinite loops
1913
1960
  # Only generate summary after multiple iterations to allow sequential execution
1914
- if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1915
- tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1916
- if tool_summary:
1917
- final_response_text = tool_summary
1918
- break
1961
+ should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
1962
+ iteration_count, accumulated_tool_results, response_text, messages
1963
+ )
1964
+ if should_break:
1965
+ final_response_text = tool_summary_text
1966
+ break
1967
+ elif tool_summary_text is None and iteration_count > self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1968
+ # Continue iteration after adding final answer prompt
1969
+ continue
1919
1970
 
1920
1971
  # Safety check: prevent infinite loops for any provider
1921
1972
  if iteration_count >= 5:
@@ -2417,18 +2468,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2417
2468
  )
2418
2469
 
2419
2470
  if stream:
2420
- if verbose:
2421
- with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2422
- for chunk in litellm.completion(**completion_params):
2423
- content = self._process_streaming_chunk(chunk)
2424
- if content:
2425
- response_text += content
2426
- live.update(display_generating(response_text, start_time))
2427
- else:
2471
+ with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2428
2472
  for chunk in litellm.completion(**completion_params):
2429
2473
  content = self._process_streaming_chunk(chunk)
2430
2474
  if content:
2431
2475
  response_text += content
2476
+ live.update(display_generating(response_text, start_time))
2477
+ if content:
2478
+ response_text += content
2432
2479
  else:
2433
2480
  response = litellm.completion(**completion_params)
2434
2481
  response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
@@ -2517,18 +2564,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2517
2564
  )
2518
2565
 
2519
2566
  if stream:
2520
- if verbose:
2521
- with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2522
- async for chunk in await litellm.acompletion(**completion_params):
2523
- content = self._process_streaming_chunk(chunk)
2524
- if content:
2525
- response_text += content
2526
- live.update(display_generating(response_text, start_time))
2527
- else:
2567
+ with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2528
2568
  async for chunk in await litellm.acompletion(**completion_params):
2529
2569
  content = self._process_streaming_chunk(chunk)
2530
2570
  if content:
2531
2571
  response_text += content
2572
+ live.update(display_generating(response_text, start_time))
2573
+ if content:
2574
+ response_text += content
2532
2575
  else:
2533
2576
  response = await litellm.acompletion(**completion_params)
2534
2577
  response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.140
3
+ Version: 0.0.141
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.140"
7
+ version = "0.0.141"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [