praisonaiagents 0.0.136__py3-none-any.whl → 0.0.137__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -302,6 +302,42 @@ class LLM:
302
302
 
303
303
  return False
304
304
 
305
+ def _generate_ollama_tool_summary(self, tool_results: List[Any], response_text: str) -> Optional[str]:
306
+ """
307
+ Generate a summary from tool results for Ollama to prevent infinite loops.
308
+
309
+ This prevents infinite loops where Ollama provides an empty response after a
310
+ tool call, expecting the user to prompt for a summary.
311
+
312
+ Args:
313
+ tool_results: The list of results from tool execution.
314
+ response_text: The text response from the LLM.
315
+
316
+ Returns:
317
+ A summary string if conditions are met, otherwise None.
318
+ """
319
+ # Constant for minimal response length check
320
+ OLLAMA_MIN_RESPONSE_LENGTH = 10
321
+
322
+ # Only generate summary for Ollama with tool results
323
+ if not (self._is_ollama_provider() and tool_results):
324
+ return None
325
+
326
+ # If response is substantial, no summary needed
327
+ if response_text and len(response_text.strip()) > OLLAMA_MIN_RESPONSE_LENGTH:
328
+ return None
329
+
330
+ # Build tool summary efficiently
331
+ summary_lines = ["Based on the tool execution results:"]
332
+ for i, result in enumerate(tool_results):
333
+ if isinstance(result, dict) and 'result' in result:
334
+ function_name = result.get('function_name', 'Tool')
335
+ summary_lines.append(f"- {function_name}: {result['result']}")
336
+ else:
337
+ summary_lines.append(f"- Tool {i+1}: {result}")
338
+
339
+ return "\n".join(summary_lines)
340
+
305
341
  def _format_ollama_tool_result_message(self, function_name: str, tool_result: Any) -> Dict[str, str]:
306
342
  """
307
343
  Format tool result message for Ollama provider.
@@ -1072,13 +1108,19 @@ class LLM:
1072
1108
  final_response_text = response_text.strip()
1073
1109
  break
1074
1110
 
1111
+ # Special handling for Ollama to prevent infinite loops
1112
+ tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1113
+ if tool_summary:
1114
+ final_response_text = tool_summary
1115
+ break
1116
+
1075
1117
  # Otherwise, continue the loop to check if more tools are needed
1076
1118
  iteration_count += 1
1077
1119
  continue
1078
1120
  else:
1079
1121
  # No tool calls, we're done with this iteration
1080
1122
  # If we've executed tools in previous iterations, this response contains the final answer
1081
- if iteration_count > 0:
1123
+ if iteration_count > 0 and not final_response_text:
1082
1124
  final_response_text = response_text.strip() if response_text else ""
1083
1125
  break
1084
1126
 
@@ -1815,13 +1857,19 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1815
1857
  final_response_text = response_text.strip()
1816
1858
  break
1817
1859
 
1860
+ # Special handling for Ollama to prevent infinite loops
1861
+ tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1862
+ if tool_summary:
1863
+ final_response_text = tool_summary
1864
+ break
1865
+
1818
1866
  # Continue the loop to check if more tools are needed
1819
1867
  iteration_count += 1
1820
1868
  continue
1821
1869
  else:
1822
1870
  # No tool calls, we're done with this iteration
1823
1871
  # If we've executed tools in previous iterations, this response contains the final answer
1824
- if iteration_count > 0:
1872
+ if iteration_count > 0 and not final_response_text:
1825
1873
  final_response_text = response_text.strip()
1826
1874
  break
1827
1875
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.136
3
+ Version: 0.0.137
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -17,7 +17,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
17
17
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
18
18
  praisonaiagents/knowledge/knowledge.py,sha256=-di_h9HxXQfAhTMMerhK16tfw8DtUndp44TGkBOzkZs,15539
19
19
  praisonaiagents/llm/__init__.py,sha256=tHvWq5mv4K4MhWr0s6rqox8UnJ5RK0kXhYuD40WkZQA,1747
20
- praisonaiagents/llm/llm.py,sha256=trPfrRKfwG4z2bx26gAbdfDrAIIqU7Vdqq3pAyCqO-g,126191
20
+ praisonaiagents/llm/llm.py,sha256=-4wADWAR5DHn9Ja-XgVOc7wl2Wh60KWDHEE7aGqvx3I,128441
21
21
  praisonaiagents/llm/model_capabilities.py,sha256=cxOvZcjZ_PIEpUYKn3S2FMyypfOSfbGpx4vmV7Y5vhI,3967
22
22
  praisonaiagents/llm/model_router.py,sha256=Jy2pShlkLxqXF3quz-MRB3-6L9vaUSgUrf2YJs_Tsg0,13995
23
23
  praisonaiagents/llm/openai_client.py,sha256=EgWjkDjVpnLKCp1gBFjccDGyqR1anOcSYJYCo45fuEI,46046
@@ -57,7 +57,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
57
57
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
58
58
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
59
59
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
60
- praisonaiagents-0.0.136.dist-info/METADATA,sha256=5ytqaBnL9hmNkwr-14WcmFsCjncMIoYNGOjn3E26Uws,1673
61
- praisonaiagents-0.0.136.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
62
- praisonaiagents-0.0.136.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
63
- praisonaiagents-0.0.136.dist-info/RECORD,,
60
+ praisonaiagents-0.0.137.dist-info/METADATA,sha256=a_1VYrge8bQjC5ysjayhFMbgB_2BoHAmrFabr3tR7zI,1673
61
+ praisonaiagents-0.0.137.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
62
+ praisonaiagents-0.0.137.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
63
+ praisonaiagents-0.0.137.dist-info/RECORD,,