praisonaiagents 0.0.140__tar.gz → 0.0.142__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/agent/agent.py +47 -34
  3. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/llm/llm.py +114 -34
  4. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/telemetry/__init__.py +2 -2
  5. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/telemetry/telemetry.py +2 -2
  6. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents.egg-info/PKG-INFO +1 -1
  7. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/pyproject.toml +1 -1
  8. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/README.md +0 -0
  9. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/__init__.py +0 -0
  10. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/agent/__init__.py +0 -0
  11. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/agent/handoff.py +0 -0
  12. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/agent/image_agent.py +0 -0
  13. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/agent/router_agent.py +0 -0
  14. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/agents/__init__.py +0 -0
  15. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/agents/agents.py +0 -0
  16. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/agents/autoagents.py +0 -0
  17. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/approval.py +0 -0
  18. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/guardrails/__init__.py +0 -0
  19. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  20. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  21. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/knowledge/__init__.py +0 -0
  22. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/knowledge/chunking.py +0 -0
  23. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/knowledge/knowledge.py +0 -0
  24. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/llm/__init__.py +0 -0
  25. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/llm/model_capabilities.py +0 -0
  26. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/llm/model_router.py +0 -0
  27. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/llm/openai_client.py +0 -0
  28. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/main.py +0 -0
  29. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/mcp/__init__.py +0 -0
  30. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/mcp/mcp.py +0 -0
  31. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
  32. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/mcp/mcp_sse.py +0 -0
  33. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/memory/__init__.py +0 -0
  34. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/memory/memory.py +0 -0
  35. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/process/__init__.py +0 -0
  36. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/process/process.py +0 -0
  37. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/session.py +0 -0
  38. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/task/__init__.py +0 -0
  39. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/task/task.py +0 -0
  40. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/telemetry/integration.py +0 -0
  41. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/README.md +0 -0
  42. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/__init__.py +0 -0
  43. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/arxiv_tools.py +0 -0
  44. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/calculator_tools.py +0 -0
  45. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/csv_tools.py +0 -0
  46. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/duckdb_tools.py +0 -0
  47. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  48. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/excel_tools.py +0 -0
  49. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/file_tools.py +0 -0
  50. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/json_tools.py +0 -0
  51. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/newspaper_tools.py +0 -0
  52. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/pandas_tools.py +0 -0
  53. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/python_tools.py +0 -0
  54. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/searxng_tools.py +0 -0
  55. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/shell_tools.py +0 -0
  56. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/spider_tools.py +0 -0
  57. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/test.py +0 -0
  58. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/tools.py +0 -0
  59. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  60. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  61. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/xml_tools.py +0 -0
  62. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/yaml_tools.py +0 -0
  63. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents/tools/yfinance_tools.py +0 -0
  64. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  65. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  66. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents.egg-info/requires.txt +0 -0
  67. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/praisonaiagents.egg-info/top_level.txt +0 -0
  68. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/setup.cfg +0 -0
  69. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test-graph-memory.py +0 -0
  70. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test.py +0 -0
  71. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_fix_comprehensive.py +0 -0
  72. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_handoff_compatibility.py +0 -0
  73. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_http_stream_basic.py +0 -0
  74. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_llm_self_reflection_direct.py +0 -0
  75. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_ollama_async_fix.py +0 -0
  76. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_ollama_fix.py +0 -0
  77. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_ollama_sequential_fix.py +0 -0
  78. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_posthog_fixed.py +0 -0
  79. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_self_reflection_comprehensive.py +0 -0
  80. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_self_reflection_fix_simple.py +0 -0
  81. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_self_reflection_fix_verification.py +0 -0
  82. {praisonaiagents-0.0.140 → praisonaiagents-0.0.142}/tests/test_validation_feedback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.140
3
+ Version: 0.0.142
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -354,6 +354,8 @@ class Agent:
354
354
  self.instructions = instructions
355
355
  # Check for model name in environment variable if not provided
356
356
  self._using_custom_llm = False
357
+ # Flag to track if final result has been displayed to prevent duplicates
358
+ self._final_display_shown = False
357
359
 
358
360
  # Store OpenAI client parameters for lazy initialization
359
361
  self._openai_api_key = api_key
@@ -653,7 +655,7 @@ Your Goal: {self.goal}
653
655
  error=f"Agent guardrail validation error: {str(e)}"
654
656
  )
655
657
 
656
- def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, tools=None):
658
+ def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, tools=None, task_name=None, task_description=None, task_id=None):
657
659
  """Apply guardrail validation with retry logic.
658
660
 
659
661
  Args:
@@ -707,7 +709,7 @@ Your Goal: {self.goal}
707
709
  # Regenerate response for retry
708
710
  try:
709
711
  retry_prompt = f"{prompt}\n\nNote: Previous response failed validation due to: {guardrail_result.error}. Please provide an improved response."
710
- response = self._chat_completion([{"role": "user", "content": retry_prompt}], temperature, tools)
712
+ response = self._chat_completion([{"role": "user", "content": retry_prompt}], temperature, tools, task_name=task_name, task_description=task_description, task_id=task_id)
711
713
  if response and response.choices:
712
714
  current_response = response.choices[0].message.content.strip()
713
715
  else:
@@ -1072,7 +1074,7 @@ Your Goal: {self.goal}"""
1072
1074
  reasoning_steps=reasoning_steps
1073
1075
  )
1074
1076
 
1075
- def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
1077
+ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
1076
1078
  start_time = time.time()
1077
1079
  logging.debug(f"{self.name} sending messages to LLM: {messages}")
1078
1080
 
@@ -1128,11 +1130,6 @@ Your Goal: {self.goal}"""
1128
1130
  )
1129
1131
  else:
1130
1132
  # Use the standard OpenAI client approach with tool support
1131
- def custom_display_fn(text, start_time):
1132
- if self.verbose:
1133
- return display_generating(text, start_time)
1134
- return ""
1135
-
1136
1133
  # Note: openai_client expects tools in various formats and will format them internally
1137
1134
  # But since we already have formatted_tools, we can pass them directly
1138
1135
  if self._openai_client is None:
@@ -1145,8 +1142,8 @@ Your Goal: {self.goal}"""
1145
1142
  tools=formatted_tools, # Already formatted for OpenAI
1146
1143
  execute_tool_fn=self.execute_tool,
1147
1144
  stream=stream,
1148
- console=self.console if self.verbose else None,
1149
- display_fn=display_generating if stream and self.verbose else None,
1145
+ console=self.console if (self.verbose or stream) else None,
1146
+ display_fn=display_generating if stream else None,
1150
1147
  reasoning_steps=reasoning_steps,
1151
1148
  verbose=self.verbose,
1152
1149
  max_iterations=10
@@ -1158,7 +1155,7 @@ Your Goal: {self.goal}"""
1158
1155
  display_error(f"Error in chat completion: {e}")
1159
1156
  return None
1160
1157
 
1161
- def _execute_callback_and_display(self, prompt: str, response: str, generation_time: float):
1158
+ def _execute_callback_and_display(self, prompt: str, response: str, generation_time: float, task_name=None, task_description=None, task_id=None):
1162
1159
  """Helper method to execute callbacks and display interaction.
1163
1160
 
1164
1161
  This centralizes the logic for callback execution and display to avoid duplication.
@@ -1174,12 +1171,13 @@ Your Goal: {self.goal}"""
1174
1171
  agent_name=self.name,
1175
1172
  agent_role=self.role,
1176
1173
  agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1177
- task_name=None, # Not available in this context
1178
- task_description=None, # Not available in this context
1179
- task_id=None # Not available in this context
1174
+ task_name=task_name,
1175
+ task_description=task_description,
1176
+ task_id=task_id
1180
1177
  )
1181
- # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1182
- if self.verbose and not self._using_custom_llm:
1178
+ # Always display final interaction when verbose is True to ensure consistent formatting
1179
+ # This ensures both OpenAI and custom LLM providers (like Gemini) show formatted output
1180
+ if self.verbose and not self._final_display_shown:
1183
1181
  display_interaction(prompt, response, markdown=self.markdown,
1184
1182
  generation_time=generation_time, console=self.console,
1185
1183
  agent_name=self.name,
@@ -1188,8 +1186,12 @@ Your Goal: {self.goal}"""
1188
1186
  task_name=None, # Not available in this context
1189
1187
  task_description=None, # Not available in this context
1190
1188
  task_id=None) # Not available in this context
1189
+ self._final_display_shown = True
1191
1190
 
1192
1191
  def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True, task_name=None, task_description=None, task_id=None):
1192
+ # Reset the final display flag for each new conversation
1193
+ self._final_display_shown = False
1194
+
1193
1195
  # Log all parameter values when in debug mode
1194
1196
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1195
1197
  param_info = {
@@ -1297,7 +1299,7 @@ Your Goal: {self.goal}"""
1297
1299
 
1298
1300
  # Apply guardrail validation for custom LLM response
1299
1301
  try:
1300
- validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1302
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools, task_name, task_description, task_id)
1301
1303
  return validated_response
1302
1304
  except Exception as e:
1303
1305
  logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
@@ -1357,7 +1359,7 @@ Your Goal: {self.goal}"""
1357
1359
  agent_tools=agent_tools
1358
1360
  )
1359
1361
 
1360
- response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream)
1362
+ response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream, task_name=task_name, task_description=task_description, task_id=task_id)
1361
1363
  if not response:
1362
1364
  # Rollback chat history on response failure
1363
1365
  self.chat_history = self.chat_history[:chat_history_length]
@@ -1372,9 +1374,9 @@ Your Goal: {self.goal}"""
1372
1374
  self.chat_history.append({"role": "assistant", "content": response_text})
1373
1375
  # Apply guardrail validation even for JSON output
1374
1376
  try:
1375
- validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1377
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
1376
1378
  # Execute callback after validation
1377
- self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1379
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
1378
1380
  return validated_response
1379
1381
  except Exception as e:
1380
1382
  logging.error(f"Agent {self.name}: Guardrail validation failed for JSON output: {e}")
@@ -1391,9 +1393,9 @@ Your Goal: {self.goal}"""
1391
1393
  if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
1392
1394
  # Apply guardrail to reasoning content
1393
1395
  try:
1394
- validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools)
1396
+ validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools, task_name, task_description, task_id)
1395
1397
  # Execute callback after validation
1396
- self._execute_callback_and_display(original_prompt, validated_reasoning, time.time() - start_time)
1398
+ self._execute_callback_and_display(original_prompt, validated_reasoning, time.time() - start_time, task_name, task_description, task_id)
1397
1399
  return validated_reasoning
1398
1400
  except Exception as e:
1399
1401
  logging.error(f"Agent {self.name}: Guardrail validation failed for reasoning content: {e}")
@@ -1402,9 +1404,9 @@ Your Goal: {self.goal}"""
1402
1404
  return None
1403
1405
  # Apply guardrail to regular response
1404
1406
  try:
1405
- validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1407
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
1406
1408
  # Execute callback after validation
1407
- self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1409
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
1408
1410
  return validated_response
1409
1411
  except Exception as e:
1410
1412
  logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
@@ -1426,7 +1428,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1426
1428
  if self._using_custom_llm or self._openai_client is None:
1427
1429
  # For custom LLMs, we need to handle reflection differently
1428
1430
  # Use non-streaming to get complete JSON response
1429
- reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False)
1431
+ reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False, task_name=task_name, task_description=task_description, task_id=task_id)
1430
1432
 
1431
1433
  if not reflection_response or not reflection_response.choices:
1432
1434
  raise Exception("No response from reflection request")
@@ -1470,9 +1472,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1470
1472
  self.chat_history.append({"role": "assistant", "content": response_text})
1471
1473
  # Apply guardrail validation after satisfactory reflection
1472
1474
  try:
1473
- validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1475
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
1474
1476
  # Execute callback after validation
1475
- self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1477
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
1476
1478
  return validated_response
1477
1479
  except Exception as e:
1478
1480
  logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
@@ -1488,9 +1490,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1488
1490
  self.chat_history.append({"role": "assistant", "content": response_text})
1489
1491
  # Apply guardrail validation after max reflections
1490
1492
  try:
1491
- validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1493
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
1492
1494
  # Execute callback after validation
1493
- self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1495
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
1494
1496
  return validated_response
1495
1497
  except Exception as e:
1496
1498
  logging.error(f"Agent {self.name}: Guardrail validation failed after max reflections: {e}")
@@ -1503,7 +1505,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1503
1505
  messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
1504
1506
  # For custom LLMs during reflection, always use non-streaming to ensure complete responses
1505
1507
  use_stream = self.stream if not self._using_custom_llm else False
1506
- response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream)
1508
+ response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream, task_name=task_name, task_description=task_description, task_id=task_id)
1507
1509
  response_text = response.choices[0].message.content.strip()
1508
1510
  reflection_count += 1
1509
1511
  continue # Continue the loop for more reflections
@@ -1538,6 +1540,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1538
1540
 
1539
1541
  async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
1540
1542
  """Async version of chat method with self-reflection support."""
1543
+ # Reset the final display flag for each new conversation
1544
+ self._final_display_shown = False
1545
+
1541
1546
  # Log all parameter values when in debug mode
1542
1547
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1543
1548
  param_info = {
@@ -1620,7 +1625,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1620
1625
 
1621
1626
  # Apply guardrail validation for custom LLM response
1622
1627
  try:
1623
- validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1628
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools, task_name, task_description, task_id)
1629
+ # Execute callback after validation
1630
+ self._execute_callback_and_display(normalized_content, validated_response, time.time() - start_time, task_name, task_description, task_id)
1624
1631
  return validated_response
1625
1632
  except Exception as e:
1626
1633
  logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
@@ -1697,6 +1704,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1697
1704
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1698
1705
  total_time = time.time() - start_time
1699
1706
  logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1707
+ # Execute callback after tool completion
1708
+ self._execute_callback_and_display(original_prompt, result, time.time() - start_time, task_name, task_description, task_id)
1700
1709
  return result
1701
1710
  elif output_json or output_pydantic:
1702
1711
  response = await self._openai_client.async_client.chat.completions.create(
@@ -1705,11 +1714,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1705
1714
  temperature=temperature,
1706
1715
  response_format={"type": "json_object"}
1707
1716
  )
1708
- # Return the raw response
1717
+ response_text = response.choices[0].message.content
1709
1718
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1710
1719
  total_time = time.time() - start_time
1711
1720
  logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1712
- return response.choices[0].message.content
1721
+ # Execute callback after JSON/Pydantic completion
1722
+ self._execute_callback_and_display(original_prompt, response_text, time.time() - start_time, task_name, task_description, task_id)
1723
+ return response_text
1713
1724
  else:
1714
1725
  response = await self._openai_client.async_client.chat.completions.create(
1715
1726
  model=self.llm,
@@ -1804,7 +1815,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1804
1815
 
1805
1816
  # Apply guardrail validation for OpenAI client response
1806
1817
  try:
1807
- validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1818
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
1819
+ # Execute callback after validation
1820
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
1808
1821
  return validated_response
1809
1822
  except Exception as e:
1810
1823
  logging.error(f"Agent {self.name}: Guardrail validation failed for OpenAI client: {e}")
@@ -329,16 +329,29 @@ class LLM:
329
329
  # For Ollama, always generate summary when we have tool results
330
330
  # This prevents infinite loops caused by empty/minimal responses
331
331
 
332
- # Build tool summary efficiently
333
- summary_lines = ["Based on the tool execution results:"]
334
- for i, result in enumerate(tool_results):
332
+ # Build tool summary more naturally to match OpenAI-style responses
333
+ if len(tool_results) == 1:
334
+ # Single tool result - create natural response
335
+ result = tool_results[0]
335
336
  if isinstance(result, dict) and 'result' in result:
336
- function_name = result.get('function_name', 'Tool')
337
- summary_lines.append(f"- {function_name}: {result['result']}")
337
+ return str(result['result'])
338
338
  else:
339
- summary_lines.append(f"- Tool {i+1}: {result}")
340
-
341
- return "\n".join(summary_lines)
339
+ return str(result)
340
+ else:
341
+ # Multiple tool results - create coherent summary
342
+ summary_lines = []
343
+ for i, result in enumerate(tool_results):
344
+ if isinstance(result, dict) and 'result' in result:
345
+ function_name = result.get('function_name', 'Tool')
346
+ summary_lines.append(f"{function_name}: {result['result']}")
347
+ else:
348
+ summary_lines.append(f"Tool {i+1}: {result}")
349
+
350
+ # Create more natural summary text
351
+ if len(summary_lines) == 2:
352
+ return f"{summary_lines[0]}. {summary_lines[1]}."
353
+ else:
354
+ return "Based on the tool execution: " + ". ".join(summary_lines) + "."
342
355
 
343
356
  def _format_ollama_tool_result_message(self, function_name: str, tool_result: Any) -> Dict[str, str]:
344
357
  """
@@ -477,6 +490,49 @@ class LLM:
477
490
  logging.debug(f"[OLLAMA_FIX] Error validating arguments for {function_name}: {e}")
478
491
  return arguments
479
492
 
493
+ def _handle_ollama_sequential_logic(self, iteration_count: int, accumulated_tool_results: List[Any],
494
+ response_text: str, messages: List[Dict]) -> tuple:
495
+ """
496
+ Handle Ollama sequential tool execution logic to prevent premature tool summary generation.
497
+
498
+ This method implements the two-step process:
499
+ 1. After reaching threshold with tool results, add explicit final answer prompt
500
+ 2. Only generate tool summary if LLM still doesn't respond after explicit prompt
501
+
502
+ Args:
503
+ iteration_count: Current iteration count
504
+ accumulated_tool_results: List of tool results from all iterations
505
+ response_text: Current LLM response text
506
+ messages: Message history list to potentially modify
507
+
508
+ Returns:
509
+ tuple: (should_break, final_response_text, iteration_count)
510
+ - should_break: Whether to break the iteration loop
511
+ - final_response_text: Text to use as final response (None if continuing)
512
+ - iteration_count: Updated iteration count
513
+ """
514
+ if not (self._is_ollama_provider() and iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD):
515
+ return False, None, iteration_count
516
+
517
+ # For Ollama: if we have meaningful tool results but empty responses,
518
+ # give LLM one final chance with explicit prompt for final answer
519
+ if accumulated_tool_results and iteration_count == self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
520
+ # Add explicit prompt asking for final answer
521
+ messages.append({
522
+ "role": "user",
523
+ "content": self.OLLAMA_FINAL_ANSWER_PROMPT
524
+ })
525
+ # Continue to next iteration to get the final response
526
+ iteration_count += 1
527
+ return False, None, iteration_count
528
+ else:
529
+ # If still no response after final answer prompt, generate summary
530
+ tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
531
+ if tool_summary:
532
+ return True, tool_summary, iteration_count
533
+
534
+ return False, None, iteration_count
535
+
480
536
  def _needs_system_message_skip(self) -> bool:
481
537
  """Check if this model requires skipping system messages"""
482
538
  if not self.model:
@@ -1130,13 +1186,18 @@ class LLM:
1130
1186
  final_response_text = response_text.strip()
1131
1187
  break
1132
1188
 
1189
+
1133
1190
  # Special handling for Ollama to prevent infinite loops
1134
1191
  # Only generate summary after multiple iterations to allow sequential execution
1135
- if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1136
- tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1137
- if tool_summary:
1138
- final_response_text = tool_summary
1139
- break
1192
+ should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
1193
+ iteration_count, accumulated_tool_results, response_text, messages
1194
+ )
1195
+ if should_break:
1196
+ final_response_text = tool_summary_text
1197
+ break
1198
+ elif tool_summary_text is None and iteration_count > self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1199
+ # Continue iteration after adding final answer prompt
1200
+ continue
1140
1201
 
1141
1202
  # Safety check: prevent infinite loops for any provider
1142
1203
  if iteration_count >= 5:
@@ -1151,6 +1212,17 @@ class LLM:
1151
1212
  continue
1152
1213
  else:
1153
1214
  # No tool calls, we're done with this iteration
1215
+
1216
+ # Special early stopping logic for Ollama when tool results are available
1217
+ # Ollama often provides empty responses after successful tool execution
1218
+ if (self._is_ollama_provider() and accumulated_tool_results and iteration_count >= 1 and
1219
+ (not response_text or response_text.strip() == "")):
1220
+ # Generate coherent response from tool results
1221
+ tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1222
+ if tool_summary:
1223
+ final_response_text = tool_summary
1224
+ break
1225
+
1154
1226
  # If we've executed tools in previous iterations, this response contains the final answer
1155
1227
  if iteration_count > 0 and not final_response_text:
1156
1228
  final_response_text = response_text.strip() if response_text else ""
@@ -1909,13 +1981,18 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1909
1981
  final_response_text = response_text.strip()
1910
1982
  break
1911
1983
 
1984
+
1912
1985
  # Special handling for Ollama to prevent infinite loops
1913
1986
  # Only generate summary after multiple iterations to allow sequential execution
1914
- if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1915
- tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1916
- if tool_summary:
1917
- final_response_text = tool_summary
1918
- break
1987
+ should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
1988
+ iteration_count, accumulated_tool_results, response_text, messages
1989
+ )
1990
+ if should_break:
1991
+ final_response_text = tool_summary_text
1992
+ break
1993
+ elif tool_summary_text is None and iteration_count > self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1994
+ # Continue iteration after adding final answer prompt
1995
+ continue
1919
1996
 
1920
1997
  # Safety check: prevent infinite loops for any provider
1921
1998
  if iteration_count >= 5:
@@ -1930,6 +2007,17 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1930
2007
  continue
1931
2008
  else:
1932
2009
  # No tool calls, we're done with this iteration
2010
+
2011
+ # Special early stopping logic for Ollama when tool results are available
2012
+ # Ollama often provides empty responses after successful tool execution
2013
+ if (self._is_ollama_provider() and accumulated_tool_results and iteration_count >= 1 and
2014
+ (not response_text or response_text.strip() == "")):
2015
+ # Generate coherent response from tool results
2016
+ tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
2017
+ if tool_summary:
2018
+ final_response_text = tool_summary
2019
+ break
2020
+
1933
2021
  # If we've executed tools in previous iterations, this response contains the final answer
1934
2022
  if iteration_count > 0 and not final_response_text:
1935
2023
  final_response_text = response_text.strip()
@@ -2417,18 +2505,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2417
2505
  )
2418
2506
 
2419
2507
  if stream:
2420
- if verbose:
2421
- with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2422
- for chunk in litellm.completion(**completion_params):
2423
- content = self._process_streaming_chunk(chunk)
2424
- if content:
2425
- response_text += content
2426
- live.update(display_generating(response_text, start_time))
2427
- else:
2508
+ with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2428
2509
  for chunk in litellm.completion(**completion_params):
2429
2510
  content = self._process_streaming_chunk(chunk)
2430
2511
  if content:
2431
2512
  response_text += content
2513
+ live.update(display_generating(response_text, start_time))
2514
+ if content:
2515
+ response_text += content
2432
2516
  else:
2433
2517
  response = litellm.completion(**completion_params)
2434
2518
  response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
@@ -2517,18 +2601,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2517
2601
  )
2518
2602
 
2519
2603
  if stream:
2520
- if verbose:
2521
- with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2522
- async for chunk in await litellm.acompletion(**completion_params):
2523
- content = self._process_streaming_chunk(chunk)
2524
- if content:
2525
- response_text += content
2526
- live.update(display_generating(response_text, start_time))
2527
- else:
2604
+ with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2528
2605
  async for chunk in await litellm.acompletion(**completion_params):
2529
2606
  content = self._process_streaming_chunk(chunk)
2530
2607
  if content:
2531
2608
  response_text += content
2609
+ live.update(display_generating(response_text, start_time))
2610
+ if content:
2611
+ response_text += content
2532
2612
  else:
2533
2613
  response = await litellm.acompletion(**completion_params)
2534
2614
  response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
@@ -65,8 +65,8 @@ def _ensure_atexit():
65
65
  ])
66
66
 
67
67
  if not telemetry_disabled:
68
- # Register atexit handler to flush telemetry on exit
69
- atexit.register(lambda: get_telemetry().flush())
68
+ # Register atexit handler to properly shutdown telemetry on exit
69
+ atexit.register(lambda: get_telemetry().shutdown())
70
70
  _atexit_registered = True
71
71
 
72
72
  def _initialize_telemetry():
@@ -354,8 +354,8 @@ class TelemetryCollector:
354
354
  pass
355
355
 
356
356
  def stop(self):
357
- """Stop telemetry collection and flush data."""
358
- self.telemetry.flush()
357
+ """Stop telemetry collection and properly shutdown."""
358
+ self.telemetry.shutdown()
359
359
 
360
360
  def trace_agent_execution(self, agent_name: str, **attributes):
361
361
  """Compatibility method for agent execution tracking."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.140
3
+ Version: 0.0.142
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.140"
7
+ version = "0.0.142"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [