praisonaiagents 0.0.136__tar.gz → 0.0.138__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/llm/llm.py +92 -8
  3. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents.egg-info/PKG-INFO +1 -1
  4. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/pyproject.toml +1 -1
  5. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_fix_comprehensive.py +1 -1
  6. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/README.md +0 -0
  7. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/__init__.py +0 -0
  8. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/agent/__init__.py +0 -0
  9. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/agent/agent.py +0 -0
  10. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/agent/handoff.py +0 -0
  11. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/agent/image_agent.py +0 -0
  12. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/agent/router_agent.py +0 -0
  13. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/agents/__init__.py +0 -0
  14. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/agents/agents.py +0 -0
  15. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/agents/autoagents.py +0 -0
  16. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/approval.py +0 -0
  17. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/guardrails/__init__.py +0 -0
  18. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  19. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  20. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/knowledge/__init__.py +0 -0
  21. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/knowledge/chunking.py +0 -0
  22. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/knowledge/knowledge.py +0 -0
  23. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/llm/__init__.py +0 -0
  24. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/llm/model_capabilities.py +0 -0
  25. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/llm/model_router.py +0 -0
  26. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/llm/openai_client.py +0 -0
  27. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/main.py +0 -0
  28. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/mcp/__init__.py +0 -0
  29. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/mcp/mcp.py +0 -0
  30. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
  31. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/mcp/mcp_sse.py +0 -0
  32. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/memory/__init__.py +0 -0
  33. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/memory/memory.py +0 -0
  34. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/process/__init__.py +0 -0
  35. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/process/process.py +0 -0
  36. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/session.py +0 -0
  37. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/task/__init__.py +0 -0
  38. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/task/task.py +0 -0
  39. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/telemetry/__init__.py +0 -0
  40. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/telemetry/integration.py +0 -0
  41. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/telemetry/telemetry.py +0 -0
  42. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/README.md +0 -0
  43. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/__init__.py +0 -0
  44. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/arxiv_tools.py +0 -0
  45. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/calculator_tools.py +0 -0
  46. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/csv_tools.py +0 -0
  47. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/duckdb_tools.py +0 -0
  48. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  49. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/excel_tools.py +0 -0
  50. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/file_tools.py +0 -0
  51. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/json_tools.py +0 -0
  52. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/newspaper_tools.py +0 -0
  53. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/pandas_tools.py +0 -0
  54. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/python_tools.py +0 -0
  55. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/searxng_tools.py +0 -0
  56. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/shell_tools.py +0 -0
  57. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/spider_tools.py +0 -0
  58. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/test.py +0 -0
  59. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/tools.py +0 -0
  60. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  61. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  62. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/xml_tools.py +0 -0
  63. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/yaml_tools.py +0 -0
  64. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents/tools/yfinance_tools.py +0 -0
  65. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  66. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  67. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents.egg-info/requires.txt +0 -0
  68. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/praisonaiagents.egg-info/top_level.txt +0 -0
  69. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/setup.cfg +0 -0
  70. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test-graph-memory.py +0 -0
  71. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test.py +0 -0
  72. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_handoff_compatibility.py +0 -0
  73. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_http_stream_basic.py +0 -0
  74. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_llm_self_reflection_direct.py +0 -0
  75. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_ollama_async_fix.py +0 -0
  76. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_ollama_fix.py +0 -0
  77. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_ollama_sequential_fix.py +0 -0
  78. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_posthog_fixed.py +0 -0
  79. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_self_reflection_comprehensive.py +0 -0
  80. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_self_reflection_fix_simple.py +0 -0
  81. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_self_reflection_fix_verification.py +0 -0
  82. {praisonaiagents-0.0.136 → praisonaiagents-0.0.138}/tests/test_validation_feedback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.136
3
+ Version: 0.0.138
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -302,6 +302,42 @@ class LLM:
302
302
 
303
303
  return False
304
304
 
305
+ def _generate_ollama_tool_summary(self, tool_results: List[Any], response_text: str) -> Optional[str]:
306
+ """
307
+ Generate a summary from tool results for Ollama to prevent infinite loops.
308
+
309
+ This prevents infinite loops where Ollama provides an empty response after a
310
+ tool call, expecting the user to prompt for a summary.
311
+
312
+ Args:
313
+ tool_results: The list of results from tool execution.
314
+ response_text: The text response from the LLM.
315
+
316
+ Returns:
317
+ A summary string if conditions are met, otherwise None.
318
+ """
319
+ # Constant for minimal response length check
320
+ OLLAMA_MIN_RESPONSE_LENGTH = 10
321
+
322
+ # Only generate summary for Ollama with tool results
323
+ if not (self._is_ollama_provider() and tool_results):
324
+ return None
325
+
326
+ # If response is substantial, no summary needed
327
+ if response_text and len(response_text.strip()) > OLLAMA_MIN_RESPONSE_LENGTH:
328
+ return None
329
+
330
+ # Build tool summary efficiently
331
+ summary_lines = ["Based on the tool execution results:"]
332
+ for i, result in enumerate(tool_results):
333
+ if isinstance(result, dict) and 'result' in result:
334
+ function_name = result.get('function_name', 'Tool')
335
+ summary_lines.append(f"- {function_name}: {result['result']}")
336
+ else:
337
+ summary_lines.append(f"- Tool {i+1}: {result}")
338
+
339
+ return "\n".join(summary_lines)
340
+
305
341
  def _format_ollama_tool_result_message(self, function_name: str, tool_result: Any) -> Dict[str, str]:
306
342
  """
307
343
  Format tool result message for Ollama provider.
@@ -858,7 +894,13 @@ class LLM:
858
894
  message=original_prompt,
859
895
  response=response_content,
860
896
  markdown=markdown,
861
- generation_time=generation_time_val
897
+ generation_time=generation_time_val,
898
+ agent_name=agent_name,
899
+ agent_role=agent_role,
900
+ agent_tools=agent_tools,
901
+ task_name=task_name,
902
+ task_description=task_description,
903
+ task_id=task_id
862
904
  )
863
905
  callback_executed = True
864
906
 
@@ -927,7 +969,13 @@ class LLM:
927
969
  message=original_prompt,
928
970
  response=response_text,
929
971
  markdown=markdown,
930
- generation_time=time.time() - current_time
972
+ generation_time=time.time() - current_time,
973
+ agent_name=agent_name,
974
+ agent_role=agent_role,
975
+ agent_tools=agent_tools,
976
+ task_name=task_name,
977
+ task_description=task_description,
978
+ task_id=task_id
931
979
  )
932
980
  callback_executed = True
933
981
 
@@ -981,7 +1029,13 @@ class LLM:
981
1029
  message=original_prompt,
982
1030
  response=response_text,
983
1031
  markdown=markdown,
984
- generation_time=time.time() - current_time
1032
+ generation_time=time.time() - current_time,
1033
+ agent_name=agent_name,
1034
+ agent_role=agent_role,
1035
+ agent_tools=agent_tools,
1036
+ task_name=task_name,
1037
+ task_description=task_description,
1038
+ task_id=task_id
985
1039
  )
986
1040
  callback_executed = True
987
1041
 
@@ -1072,13 +1126,19 @@ class LLM:
1072
1126
  final_response_text = response_text.strip()
1073
1127
  break
1074
1128
 
1129
+ # Special handling for Ollama to prevent infinite loops
1130
+ tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1131
+ if tool_summary:
1132
+ final_response_text = tool_summary
1133
+ break
1134
+
1075
1135
  # Otherwise, continue the loop to check if more tools are needed
1076
1136
  iteration_count += 1
1077
1137
  continue
1078
1138
  else:
1079
1139
  # No tool calls, we're done with this iteration
1080
1140
  # If we've executed tools in previous iterations, this response contains the final answer
1081
- if iteration_count > 0:
1141
+ if iteration_count > 0 and not final_response_text:
1082
1142
  final_response_text = response_text.strip() if response_text else ""
1083
1143
  break
1084
1144
 
@@ -1133,7 +1193,13 @@ class LLM:
1133
1193
  message=original_prompt,
1134
1194
  response=response_content,
1135
1195
  markdown=markdown,
1136
- generation_time=generation_time_val
1196
+ generation_time=generation_time_val,
1197
+ agent_name=agent_name,
1198
+ agent_role=agent_role,
1199
+ agent_tools=agent_tools,
1200
+ task_name=task_name,
1201
+ task_description=task_description,
1202
+ task_id=task_id
1137
1203
  )
1138
1204
  callback_executed = True
1139
1205
 
@@ -1162,7 +1228,13 @@ class LLM:
1162
1228
  message=original_prompt,
1163
1229
  response=response_text,
1164
1230
  markdown=markdown,
1165
- generation_time=time.time() - start_time
1231
+ generation_time=time.time() - start_time,
1232
+ agent_name=agent_name,
1233
+ agent_role=agent_role,
1234
+ agent_tools=agent_tools,
1235
+ task_name=task_name,
1236
+ task_description=task_description,
1237
+ task_id=task_id
1166
1238
  )
1167
1239
  callback_executed = True
1168
1240
  return response_text
@@ -1182,7 +1254,13 @@ class LLM:
1182
1254
  message=original_prompt,
1183
1255
  response=response_text,
1184
1256
  markdown=markdown,
1185
- generation_time=time.time() - start_time
1257
+ generation_time=time.time() - start_time,
1258
+ agent_name=agent_name,
1259
+ agent_role=agent_role,
1260
+ agent_tools=agent_tools,
1261
+ task_name=task_name,
1262
+ task_description=task_description,
1263
+ task_id=task_id
1186
1264
  )
1187
1265
  callback_executed = True
1188
1266
 
@@ -1815,13 +1893,19 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1815
1893
  final_response_text = response_text.strip()
1816
1894
  break
1817
1895
 
1896
+ # Special handling for Ollama to prevent infinite loops
1897
+ tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1898
+ if tool_summary:
1899
+ final_response_text = tool_summary
1900
+ break
1901
+
1818
1902
  # Continue the loop to check if more tools are needed
1819
1903
  iteration_count += 1
1820
1904
  continue
1821
1905
  else:
1822
1906
  # No tool calls, we're done with this iteration
1823
1907
  # If we've executed tools in previous iterations, this response contains the final answer
1824
- if iteration_count > 0:
1908
+ if iteration_count > 0 and not final_response_text:
1825
1909
  final_response_text = response_text.strip()
1826
1910
  break
1827
1911
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.136
3
+ Version: 0.0.138
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.136"
7
+ version = "0.0.138"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [
@@ -57,7 +57,7 @@ print("=" * 60)
57
57
 
58
58
  agent_gpt4 = Agent(
59
59
  instructions="You are a helpful assistant. You can use the tools provided to you to help the user. When asked to multiply a stock price, first get the stock price, then multiply it.",
60
- llm="gpt-4o",
60
+ llm="gpt-4o-mini",
61
61
  tools=[get_stock_price, multiply],
62
62
  verbose=True
63
63
  )