praisonaiagents 0.0.137__py3-none-any.whl → 0.0.139__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1536,7 +1536,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1536
1536
  cleaned = cleaned[:-3].strip()
1537
1537
  return cleaned
1538
1538
 
1539
- async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
1539
+ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
1540
1540
  """Async version of chat method with self-reflection support."""
1541
1541
  # Log all parameter values when in debug mode
1542
1542
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
@@ -1944,7 +1944,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1944
1944
  prompt = task
1945
1945
  else:
1946
1946
  prompt = str(task)
1947
- return await self.achat(prompt)
1947
+ # Extract task info if available
1948
+ task_name = getattr(task, 'name', None)
1949
+ task_description = getattr(task, 'description', None)
1950
+ task_id = getattr(task, 'id', None)
1951
+ return await self.achat(prompt, task_name=task_name, task_description=task_description, task_id=task_id)
1948
1952
 
1949
1953
  async def execute_tool_async(self, function_name: str, arguments: Dict[str, Any]) -> Any:
1950
1954
  """Async version of execute_tool"""
@@ -2113,7 +2117,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2113
2117
  try:
2114
2118
  # Use async version if available, otherwise use sync version
2115
2119
  if asyncio.iscoroutinefunction(self.chat):
2116
- response = await self.achat(query)
2120
+ response = await self.achat(query, task_name=None, task_description=None, task_id=None)
2117
2121
  else:
2118
2122
  # Run sync function in a thread to avoid blocking
2119
2123
  loop = asyncio.get_event_loop()
@@ -2234,7 +2238,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2234
2238
  try:
2235
2239
  # Ensure self.achat is used as it's the async version and pass its tools
2236
2240
  if hasattr(self, 'achat') and asyncio.iscoroutinefunction(self.achat):
2237
- response = await self.achat(prompt, tools=self.tools)
2241
+ response = await self.achat(prompt, tools=self.tools, task_name=None, task_description=None, task_id=None)
2238
2242
  elif hasattr(self, 'chat'): # Fallback for synchronous chat
2239
2243
  loop = asyncio.get_event_loop()
2240
2244
  response = await loop.run_in_executor(None, lambda p=prompt: self.chat(p, tools=self.tools))
@@ -362,14 +362,20 @@ Context:
362
362
  _get_multimodal_message(task_prompt, task.images),
363
363
  tools=tools,
364
364
  output_json=task.output_json,
365
- output_pydantic=task.output_pydantic
365
+ output_pydantic=task.output_pydantic,
366
+ task_name=task.name,
367
+ task_description=task.description,
368
+ task_id=task.id
366
369
  )
367
370
  else:
368
371
  agent_output = await executor_agent.achat(
369
372
  task_prompt,
370
373
  tools=tools,
371
374
  output_json=task.output_json,
372
- output_pydantic=task.output_pydantic
375
+ output_pydantic=task.output_pydantic,
376
+ task_name=task.name,
377
+ task_description=task.description,
378
+ task_id=task.id
373
379
  )
374
380
 
375
381
  if agent_output:
@@ -1138,7 +1144,7 @@ Context:
1138
1144
  try:
1139
1145
  # Use async version if available, otherwise use sync version
1140
1146
  if asyncio.iscoroutinefunction(agent_instance.chat):
1141
- response = await agent_instance.achat(current_input)
1147
+ response = await agent_instance.achat(current_input, task_name=None, task_description=None, task_id=None)
1142
1148
  else:
1143
1149
  # Run sync function in a thread to avoid blocking
1144
1150
  loop = asyncio.get_running_loop()
@@ -1294,7 +1300,7 @@ Context:
1294
1300
  try:
1295
1301
  logging.debug(f"Processing with agent: {agent_instance.name}")
1296
1302
  if hasattr(agent_instance, 'achat') and asyncio.iscoroutinefunction(agent_instance.achat):
1297
- response = await agent_instance.achat(current_input, tools=agent_instance.tools)
1303
+ response = await agent_instance.achat(current_input, tools=agent_instance.tools, task_name=None, task_description=None, task_id=None)
1298
1304
  elif hasattr(agent_instance, 'chat'): # Fallback to sync chat if achat not suitable
1299
1305
  loop = asyncio.get_running_loop()
1300
1306
  response = await loop.run_in_executor(None, lambda ci=current_input: agent_instance.chat(ci, tools=agent_instance.tools))
@@ -93,6 +93,9 @@ class LLM:
93
93
  # Ollama-specific prompt constants
94
94
  OLLAMA_TOOL_USAGE_PROMPT = "Please analyze the request and use the available tools to help answer the question. Start by identifying what information you need."
95
95
  OLLAMA_FINAL_ANSWER_PROMPT = "Based on the tool results above, please provide the final answer to the original question."
96
+
97
+ # Ollama iteration threshold for summary generation
98
+ OLLAMA_SUMMARY_ITERATION_THRESHOLD = 3
96
99
 
97
100
  def _log_llm_config(self, method_name: str, **config):
98
101
  """Centralized debug logging for LLM configuration and parameters.
@@ -323,9 +326,8 @@ class LLM:
323
326
  if not (self._is_ollama_provider() and tool_results):
324
327
  return None
325
328
 
326
- # If response is substantial, no summary needed
327
- if response_text and len(response_text.strip()) > OLLAMA_MIN_RESPONSE_LENGTH:
328
- return None
329
+ # For Ollama, always generate summary when we have tool results
330
+ # This prevents infinite loops caused by empty/minimal responses
329
331
 
330
332
  # Build tool summary efficiently
331
333
  summary_lines = ["Based on the tool execution results:"]
@@ -827,6 +829,7 @@ class LLM:
827
829
  iteration_count = 0
828
830
  final_response_text = ""
829
831
  stored_reasoning_content = None # Store reasoning content from tool execution
832
+ accumulated_tool_results = [] # Store all tool results across iterations
830
833
 
831
834
  while iteration_count < max_iterations:
832
835
  try:
@@ -894,7 +897,13 @@ class LLM:
894
897
  message=original_prompt,
895
898
  response=response_content,
896
899
  markdown=markdown,
897
- generation_time=generation_time_val
900
+ generation_time=generation_time_val,
901
+ agent_name=agent_name,
902
+ agent_role=agent_role,
903
+ agent_tools=agent_tools,
904
+ task_name=task_name,
905
+ task_description=task_description,
906
+ task_id=task_id
898
907
  )
899
908
  callback_executed = True
900
909
 
@@ -963,7 +972,13 @@ class LLM:
963
972
  message=original_prompt,
964
973
  response=response_text,
965
974
  markdown=markdown,
966
- generation_time=time.time() - current_time
975
+ generation_time=time.time() - current_time,
976
+ agent_name=agent_name,
977
+ agent_role=agent_role,
978
+ agent_tools=agent_tools,
979
+ task_name=task_name,
980
+ task_description=task_description,
981
+ task_id=task_id
967
982
  )
968
983
  callback_executed = True
969
984
 
@@ -1017,7 +1032,13 @@ class LLM:
1017
1032
  message=original_prompt,
1018
1033
  response=response_text,
1019
1034
  markdown=markdown,
1020
- generation_time=time.time() - current_time
1035
+ generation_time=time.time() - current_time,
1036
+ agent_name=agent_name,
1037
+ agent_role=agent_role,
1038
+ agent_tools=agent_tools,
1039
+ task_name=task_name,
1040
+ task_description=task_description,
1041
+ task_id=task_id
1021
1042
  )
1022
1043
  callback_executed = True
1023
1044
 
@@ -1052,7 +1073,7 @@ class LLM:
1052
1073
  })
1053
1074
 
1054
1075
  should_continue = False
1055
- tool_results = [] # Store all tool results
1076
+ tool_results = [] # Store current iteration tool results
1056
1077
  for tool_call in tool_calls:
1057
1078
  # Handle both object and dict access patterns
1058
1079
  is_ollama = self._is_ollama_provider()
@@ -1066,6 +1087,7 @@ class LLM:
1066
1087
  tool_result = execute_tool_fn(function_name, arguments)
1067
1088
  logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
1068
1089
  tool_results.append(tool_result) # Store the result
1090
+ accumulated_tool_results.append(tool_result) # Accumulate across iterations
1069
1091
 
1070
1092
  if verbose:
1071
1093
  display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -1103,15 +1125,25 @@ class LLM:
1103
1125
 
1104
1126
  # Check if the LLM provided a final answer alongside the tool calls
1105
1127
  # If response_text contains substantive content, treat it as the final answer
1106
- if response_text and response_text.strip() and len(response_text.strip()) > 10:
1128
+ if response_text and len(response_text.strip()) > 10:
1107
1129
  # LLM provided a final answer after tool execution, don't continue
1108
1130
  final_response_text = response_text.strip()
1109
1131
  break
1110
1132
 
1111
1133
  # Special handling for Ollama to prevent infinite loops
1112
- tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1113
- if tool_summary:
1114
- final_response_text = tool_summary
1134
+ # Only generate summary after multiple iterations to allow sequential execution
1135
+ if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1136
+ tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1137
+ if tool_summary:
1138
+ final_response_text = tool_summary
1139
+ break
1140
+
1141
+ # Safety check: prevent infinite loops for any provider
1142
+ if iteration_count >= 5:
1143
+ if tool_results:
1144
+ final_response_text = "Task completed successfully based on tool execution results."
1145
+ else:
1146
+ final_response_text = response_text.strip() if response_text else "Task completed."
1115
1147
  break
1116
1148
 
1117
1149
  # Otherwise, continue the loop to check if more tools are needed
@@ -1175,7 +1207,13 @@ class LLM:
1175
1207
  message=original_prompt,
1176
1208
  response=response_content,
1177
1209
  markdown=markdown,
1178
- generation_time=generation_time_val
1210
+ generation_time=generation_time_val,
1211
+ agent_name=agent_name,
1212
+ agent_role=agent_role,
1213
+ agent_tools=agent_tools,
1214
+ task_name=task_name,
1215
+ task_description=task_description,
1216
+ task_id=task_id
1179
1217
  )
1180
1218
  callback_executed = True
1181
1219
 
@@ -1204,7 +1242,13 @@ class LLM:
1204
1242
  message=original_prompt,
1205
1243
  response=response_text,
1206
1244
  markdown=markdown,
1207
- generation_time=time.time() - start_time
1245
+ generation_time=time.time() - start_time,
1246
+ agent_name=agent_name,
1247
+ agent_role=agent_role,
1248
+ agent_tools=agent_tools,
1249
+ task_name=task_name,
1250
+ task_description=task_description,
1251
+ task_id=task_id
1208
1252
  )
1209
1253
  callback_executed = True
1210
1254
  return response_text
@@ -1224,7 +1268,13 @@ class LLM:
1224
1268
  message=original_prompt,
1225
1269
  response=response_text,
1226
1270
  markdown=markdown,
1227
- generation_time=time.time() - start_time
1271
+ generation_time=time.time() - start_time,
1272
+ agent_name=agent_name,
1273
+ agent_role=agent_role,
1274
+ agent_tools=agent_tools,
1275
+ task_name=task_name,
1276
+ task_description=task_description,
1277
+ task_id=task_id
1228
1278
  )
1229
1279
  callback_executed = True
1230
1280
 
@@ -1543,6 +1593,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1543
1593
  iteration_count = 0
1544
1594
  final_response_text = ""
1545
1595
  stored_reasoning_content = None # Store reasoning content from tool execution
1596
+ accumulated_tool_results = [] # Store all tool results across iterations
1546
1597
 
1547
1598
  while iteration_count < max_iterations:
1548
1599
  response_text = ""
@@ -1713,7 +1764,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1713
1764
  "tool_calls": serializable_tool_calls
1714
1765
  })
1715
1766
 
1716
- tool_results = [] # Store all tool results
1767
+ tool_results = [] # Store current iteration tool results
1717
1768
  for tool_call in tool_calls:
1718
1769
  # Handle both object and dict access patterns
1719
1770
  is_ollama = self._is_ollama_provider()
@@ -1725,6 +1776,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1725
1776
 
1726
1777
  tool_result = await execute_tool_fn(function_name, arguments)
1727
1778
  tool_results.append(tool_result) # Store the result
1779
+ accumulated_tool_results.append(tool_result) # Accumulate across iterations
1728
1780
 
1729
1781
  if verbose:
1730
1782
  display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -1852,15 +1904,25 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1852
1904
 
1853
1905
  # Check if the LLM provided a final answer alongside the tool calls
1854
1906
  # If response_text contains substantive content, treat it as the final answer
1855
- if response_text and response_text.strip() and len(response_text.strip()) > 10:
1907
+ if response_text and len(response_text.strip()) > 10:
1856
1908
  # LLM provided a final answer after tool execution, don't continue
1857
1909
  final_response_text = response_text.strip()
1858
1910
  break
1859
1911
 
1860
1912
  # Special handling for Ollama to prevent infinite loops
1861
- tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1862
- if tool_summary:
1863
- final_response_text = tool_summary
1913
+ # Only generate summary after multiple iterations to allow sequential execution
1914
+ if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1915
+ tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1916
+ if tool_summary:
1917
+ final_response_text = tool_summary
1918
+ break
1919
+
1920
+ # Safety check: prevent infinite loops for any provider
1921
+ if iteration_count >= 5:
1922
+ if tool_results:
1923
+ final_response_text = "Task completed successfully based on tool execution results."
1924
+ else:
1925
+ final_response_text = response_text.strip() if response_text else "Task completed."
1864
1926
  break
1865
1927
 
1866
1928
  # Continue the loop to check if more tools are needed
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.137
3
+ Version: 0.0.139
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -3,12 +3,12 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
3
3
  praisonaiagents/main.py,sha256=b5dKlkf6NMeumSzixreHB9ui90f8YMAi5r1fCbTpQVw,17225
4
4
  praisonaiagents/session.py,sha256=FHWButPBaFGA4x1U_2gImroQChHnFy231_aAa_n5KOQ,20364
5
5
  praisonaiagents/agent/__init__.py,sha256=FkjW6f3EU8heQ9tvctfLbOWV9_dOXmS1PcFNgcStns8,403
6
- praisonaiagents/agent/agent.py,sha256=1m9s6irqenYatuoGHHS1DX913rm9QpjcJHJjvPH6_QE,123338
6
+ praisonaiagents/agent/agent.py,sha256=oig3vY0mlsTmxJl6QyvWWGA4srPjFlRVR6Dd8CseFhc,123765
7
7
  praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
8
8
  praisonaiagents/agent/image_agent.py,sha256=Bbwg_h3qhjhG7gMH8sdcQXhcOFgE_wSvcdhtqH5f2UM,9145
9
9
  praisonaiagents/agent/router_agent.py,sha256=a_b6w5Ti05gvK80uKGMIcT14fiCTKv8rCQPCWAUfIiE,12713
10
10
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
11
- praisonaiagents/agents/agents.py,sha256=WfzlnwiqiEdU6z-6j_Xp0LyhIApKNj0G6L0Hlr418yE,64420
11
+ praisonaiagents/agents/agents.py,sha256=DkBgdE6hK22qkJjufYXNGDXTM1vsAv-nf4RyhPX6sEs,64768
12
12
  praisonaiagents/agents/autoagents.py,sha256=v5pJfTgHnFzG5K2gHwfRA0nZ7Ikptir6hUNvOZ--E44,20777
13
13
  praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
14
14
  praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
@@ -17,7 +17,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
17
17
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
18
18
  praisonaiagents/knowledge/knowledge.py,sha256=-di_h9HxXQfAhTMMerhK16tfw8DtUndp44TGkBOzkZs,15539
19
19
  praisonaiagents/llm/__init__.py,sha256=tHvWq5mv4K4MhWr0s6rqox8UnJ5RK0kXhYuD40WkZQA,1747
20
- praisonaiagents/llm/llm.py,sha256=-4wADWAR5DHn9Ja-XgVOc7wl2Wh60KWDHEE7aGqvx3I,128441
20
+ praisonaiagents/llm/llm.py,sha256=7TMGPTjNvYVgUEvjtnpSImAI6pM_rQiin9j_DqYCN24,132172
21
21
  praisonaiagents/llm/model_capabilities.py,sha256=cxOvZcjZ_PIEpUYKn3S2FMyypfOSfbGpx4vmV7Y5vhI,3967
22
22
  praisonaiagents/llm/model_router.py,sha256=Jy2pShlkLxqXF3quz-MRB3-6L9vaUSgUrf2YJs_Tsg0,13995
23
23
  praisonaiagents/llm/openai_client.py,sha256=EgWjkDjVpnLKCp1gBFjccDGyqR1anOcSYJYCo45fuEI,46046
@@ -57,7 +57,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
57
57
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
58
58
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
59
59
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
60
- praisonaiagents-0.0.137.dist-info/METADATA,sha256=a_1VYrge8bQjC5ysjayhFMbgB_2BoHAmrFabr3tR7zI,1673
61
- praisonaiagents-0.0.137.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
62
- praisonaiagents-0.0.137.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
63
- praisonaiagents-0.0.137.dist-info/RECORD,,
60
+ praisonaiagents-0.0.139.dist-info/METADATA,sha256=2kOFXS7fONbsMZdzkrvPwvicvQqtnuLFurYH4G8YIZ0,1673
61
+ praisonaiagents-0.0.139.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
62
+ praisonaiagents-0.0.139.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
63
+ praisonaiagents-0.0.139.dist-info/RECORD,,