praisonaiagents 0.0.125__tar.gz → 0.0.127__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/agent/agent.py +2 -1
  3. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/llm/llm.py +217 -207
  4. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/llm/openai_client.py +4 -78
  5. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents.egg-info/PKG-INFO +1 -1
  6. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents.egg-info/SOURCES.txt +1 -0
  7. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/pyproject.toml +1 -1
  8. praisonaiagents-0.0.127/tests/test_fix_comprehensive.py +75 -0
  9. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/README.md +0 -0
  10. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/__init__.py +0 -0
  11. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/agent/__init__.py +0 -0
  12. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/agent/handoff.py +0 -0
  13. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/agent/image_agent.py +0 -0
  14. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/agents/__init__.py +0 -0
  15. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/agents/agents.py +0 -0
  16. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/agents/autoagents.py +0 -0
  17. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/approval.py +0 -0
  18. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/guardrails/__init__.py +0 -0
  19. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  20. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  21. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/knowledge/__init__.py +0 -0
  22. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/knowledge/chunking.py +0 -0
  23. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/knowledge/knowledge.py +0 -0
  24. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/llm/__init__.py +0 -0
  25. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/llm/model_capabilities.py +0 -0
  26. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/main.py +0 -0
  27. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/mcp/__init__.py +0 -0
  28. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/mcp/mcp.py +0 -0
  29. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
  30. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/mcp/mcp_sse.py +0 -0
  31. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/memory/__init__.py +0 -0
  32. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/memory/memory.py +0 -0
  33. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/process/__init__.py +0 -0
  34. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/process/process.py +0 -0
  35. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/session.py +0 -0
  36. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/task/__init__.py +0 -0
  37. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/task/task.py +0 -0
  38. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/telemetry/__init__.py +0 -0
  39. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/telemetry/integration.py +0 -0
  40. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/telemetry/telemetry.py +0 -0
  41. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/README.md +0 -0
  42. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/__init__.py +0 -0
  43. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/arxiv_tools.py +0 -0
  44. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/calculator_tools.py +0 -0
  45. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/csv_tools.py +0 -0
  46. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/duckdb_tools.py +0 -0
  47. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  48. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/excel_tools.py +0 -0
  49. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/file_tools.py +0 -0
  50. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/json_tools.py +0 -0
  51. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/newspaper_tools.py +0 -0
  52. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/pandas_tools.py +0 -0
  53. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/python_tools.py +0 -0
  54. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/searxng_tools.py +0 -0
  55. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/shell_tools.py +0 -0
  56. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/spider_tools.py +0 -0
  57. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/test.py +0 -0
  58. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/tools.py +0 -0
  59. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  60. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  61. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/xml_tools.py +0 -0
  62. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/yaml_tools.py +0 -0
  63. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents/tools/yfinance_tools.py +0 -0
  64. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  65. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents.egg-info/requires.txt +0 -0
  66. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/praisonaiagents.egg-info/top_level.txt +0 -0
  67. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/setup.cfg +0 -0
  68. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/tests/test-graph-memory.py +0 -0
  69. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/tests/test.py +0 -0
  70. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/tests/test_handoff_compatibility.py +0 -0
  71. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/tests/test_http_stream_basic.py +0 -0
  72. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/tests/test_ollama_async_fix.py +0 -0
  73. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/tests/test_ollama_fix.py +0 -0
  74. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/tests/test_posthog_fixed.py +0 -0
  75. {praisonaiagents-0.0.125 → praisonaiagents-0.0.127}/tests/test_validation_feedback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.125
3
+ Version: 0.0.127
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1235,7 +1235,8 @@ Your Goal: {self.goal}"""
1235
1235
  agent_role=self.role,
1236
1236
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
1237
1237
  execute_tool_fn=self.execute_tool, # Pass tool execution function
1238
- reasoning_steps=reasoning_steps
1238
+ reasoning_steps=reasoning_steps,
1239
+ stream=stream # Pass the stream parameter from chat method
1239
1240
  )
1240
1241
 
1241
1242
  self.chat_history.append({"role": "assistant", "content": response_text})
@@ -680,6 +680,7 @@ class LLM:
680
680
  max_iterations = 10 # Prevent infinite loops
681
681
  iteration_count = 0
682
682
  final_response_text = ""
683
+ stored_reasoning_content = None # Store reasoning content from tool execution
683
684
 
684
685
  while iteration_count < max_iterations:
685
686
  try:
@@ -771,7 +772,7 @@ class LLM:
771
772
  if formatted_tools and self._supports_streaming_tools():
772
773
  tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
773
774
 
774
- response_text = response_text.strip()
775
+ response_text = response_text.strip() if response_text else "" if response_text else "" if response_text else "" if response_text else ""
775
776
 
776
777
  # Create a mock final_response with the captured data
777
778
  final_response = {
@@ -857,8 +858,6 @@ class LLM:
857
858
  iteration_count += 1
858
859
  continue
859
860
 
860
- # If we reach here, no more tool calls needed - get final response
861
- # Make one more call to get the final summary response
862
861
  # Special handling for Ollama models that don't automatically process tool results
863
862
  ollama_handled = False
864
863
  ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
@@ -905,7 +904,7 @@ class LLM:
905
904
 
906
905
  # Set flag to indicate Ollama was handled
907
906
  ollama_handled = True
908
- final_response_text = response_text.strip()
907
+ final_response_text = response_text.strip() if response_text else ""
909
908
  logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
910
909
 
911
910
  # Display the response if we got one
@@ -918,15 +917,23 @@ class LLM:
918
917
  console=console
919
918
  )
920
919
 
921
- # Return the final response after processing Ollama's follow-up
920
+ # Update messages and continue the loop instead of returning
922
921
  if final_response_text:
923
- return final_response_text
922
+ # Update messages with the response to maintain conversation context
923
+ messages.append({
924
+ "role": "assistant",
925
+ "content": final_response_text
926
+ })
927
+ # Continue the loop to check if more tools are needed
928
+ iteration_count += 1
929
+ continue
924
930
  else:
925
931
  logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
926
932
 
927
- # If reasoning_steps is True and we haven't handled Ollama already, do a single non-streaming call
933
+ # Handle reasoning_steps after tool execution if not already handled by Ollama
928
934
  if reasoning_steps and not ollama_handled:
929
- resp = litellm.completion(
935
+ # Make a non-streaming call to capture reasoning content
936
+ reasoning_resp = litellm.completion(
930
937
  **self._build_completion_params(
931
938
  messages=messages,
932
939
  temperature=temperature,
@@ -934,89 +941,28 @@ class LLM:
934
941
  **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
935
942
  )
936
943
  )
937
- reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
938
- response_text = resp["choices"][0]["message"]["content"]
944
+ reasoning_content = reasoning_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
945
+ response_text = reasoning_resp["choices"][0]["message"]["content"]
939
946
 
940
- # Optionally display reasoning if present
941
- if verbose and reasoning_content:
942
- display_interaction(
943
- original_prompt,
944
- f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
945
- markdown=markdown,
946
- generation_time=time.time() - start_time,
947
- console=console
948
- )
949
- else:
950
- display_interaction(
951
- original_prompt,
952
- response_text,
953
- markdown=markdown,
954
- generation_time=time.time() - start_time,
955
- console=console
956
- )
957
-
958
- # Otherwise do the existing streaming approach if not already handled
959
- elif not ollama_handled:
960
- # Get response after tool calls
961
- if stream:
962
- # Streaming approach
963
- if verbose:
964
- with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
965
- final_response_text = ""
966
- for chunk in litellm.completion(
967
- **self._build_completion_params(
968
- messages=messages,
969
- tools=formatted_tools,
970
- temperature=temperature,
971
- stream=True,
972
- **kwargs
973
- )
974
- ):
975
- if chunk and chunk.choices and chunk.choices[0].delta.content:
976
- content = chunk.choices[0].delta.content
977
- final_response_text += content
978
- live.update(display_generating(final_response_text, current_time))
979
- else:
980
- final_response_text = ""
981
- for chunk in litellm.completion(
982
- **self._build_completion_params(
983
- messages=messages,
984
- tools=formatted_tools,
985
- temperature=temperature,
986
- stream=True,
987
- **kwargs
988
- )
989
- ):
990
- if chunk and chunk.choices and chunk.choices[0].delta.content:
991
- final_response_text += chunk.choices[0].delta.content
992
- else:
993
- # Non-streaming approach
994
- resp = litellm.completion(
995
- **self._build_completion_params(
996
- messages=messages,
997
- tools=formatted_tools,
998
- temperature=temperature,
999
- stream=False,
1000
- **kwargs
1001
- )
1002
- )
1003
- final_response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""
947
+ # Store reasoning content for later use
948
+ if reasoning_content:
949
+ stored_reasoning_content = reasoning_content
1004
950
 
1005
- final_response_text = final_response_text.strip()
1006
-
1007
- # Display final response
1008
- if verbose:
1009
- display_interaction(
1010
- original_prompt,
1011
- final_response_text,
1012
- markdown=markdown,
1013
- generation_time=time.time() - start_time,
1014
- console=console
1015
- )
951
+ # Update messages with the response
952
+ messages.append({
953
+ "role": "assistant",
954
+ "content": response_text
955
+ })
1016
956
 
1017
- return final_response_text
957
+ # After tool execution, continue the loop to check if more tools are needed
958
+ # instead of immediately trying to get a final response
959
+ iteration_count += 1
960
+ continue
1018
961
  else:
1019
962
  # No tool calls, we're done with this iteration
963
+ # If we've executed tools in previous iterations, this response contains the final answer
964
+ if iteration_count > 0:
965
+ final_response_text = response_text.strip() if response_text else ""
1020
966
  break
1021
967
 
1022
968
  except Exception as e:
@@ -1029,15 +975,29 @@ class LLM:
1029
975
 
1030
976
  # No tool calls were made in this iteration, return the response
1031
977
  if verbose:
1032
- display_interaction(
1033
- original_prompt,
1034
- response_text,
1035
- markdown=markdown,
1036
- generation_time=time.time() - start_time,
1037
- console=console
1038
- )
978
+ # If we have stored reasoning content from tool execution, display it
979
+ if stored_reasoning_content:
980
+ display_interaction(
981
+ original_prompt,
982
+ f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{response_text}",
983
+ markdown=markdown,
984
+ generation_time=time.time() - start_time,
985
+ console=console
986
+ )
987
+ else:
988
+ display_interaction(
989
+ original_prompt,
990
+ response_text,
991
+ markdown=markdown,
992
+ generation_time=time.time() - start_time,
993
+ console=console
994
+ )
995
+
996
+ response_text = response_text.strip() if response_text else ""
1039
997
 
1040
- response_text = response_text.strip()
998
+ # Return reasoning content if reasoning_steps is True and we have it
999
+ if reasoning_steps and stored_reasoning_content:
1000
+ return stored_reasoning_content
1041
1001
 
1042
1002
  # Handle output formatting
1043
1003
  if output_json or output_pydantic:
@@ -1053,8 +1013,8 @@ class LLM:
1053
1013
  display_interaction(original_prompt, response_text, markdown=markdown,
1054
1014
  generation_time=time.time() - start_time, console=console)
1055
1015
  # Return reasoning content if reasoning_steps is True
1056
- if reasoning_steps and reasoning_content:
1057
- return reasoning_content
1016
+ if reasoning_steps and stored_reasoning_content:
1017
+ return stored_reasoning_content
1058
1018
  return response_text
1059
1019
 
1060
1020
  # Handle self-reflection loop
@@ -1195,7 +1155,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1195
1155
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1196
1156
  response_text += chunk.choices[0].delta.content
1197
1157
 
1198
- response_text = response_text.strip()
1158
+ response_text = response_text.strip() if response_text else "" if response_text else ""
1199
1159
  continue
1200
1160
 
1201
1161
  except json.JSONDecodeError:
@@ -1317,118 +1277,126 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1317
1277
  # Format tools for LiteLLM using the shared helper
1318
1278
  formatted_tools = self._format_tools_for_litellm(tools)
1319
1279
 
1320
- response_text = ""
1321
- if reasoning_steps:
1322
- # Non-streaming call to capture reasoning
1323
- resp = await litellm.acompletion(
1324
- **self._build_completion_params(
1325
- messages=messages,
1280
+ # Initialize variables for iteration loop
1281
+ max_iterations = 10 # Prevent infinite loops
1282
+ iteration_count = 0
1283
+ final_response_text = ""
1284
+ stored_reasoning_content = None # Store reasoning content from tool execution
1285
+
1286
+ while iteration_count < max_iterations:
1287
+ response_text = ""
1288
+ reasoning_content = None
1289
+ tool_calls = []
1290
+
1291
+ if reasoning_steps and iteration_count == 0:
1292
+ # Non-streaming call to capture reasoning
1293
+ resp = await litellm.acompletion(
1294
+ **self._build_completion_params(
1295
+ messages=messages,
1326
1296
  temperature=temperature,
1327
1297
  stream=False, # force non-streaming
1328
1298
  **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1329
1299
  )
1330
- )
1331
- reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
1332
- response_text = resp["choices"][0]["message"]["content"]
1333
-
1334
- if verbose and reasoning_content:
1335
- display_interaction(
1336
- "Initial reasoning:",
1337
- f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
1338
- markdown=markdown,
1339
- generation_time=time.time() - start_time,
1340
- console=console
1341
- )
1342
- elif verbose:
1343
- display_interaction(
1344
- "Initial response:",
1345
- response_text,
1346
- markdown=markdown,
1347
- generation_time=time.time() - start_time,
1348
- console=console
1349
1300
  )
1350
- else:
1351
- # Determine if we should use streaming based on tool support
1352
- use_streaming = stream
1353
- if formatted_tools and not self._supports_streaming_tools():
1354
- # Provider doesn't support streaming with tools, use non-streaming
1355
- use_streaming = False
1356
-
1357
- if use_streaming:
1358
- # Streaming approach (with or without tools)
1359
- tool_calls = []
1301
+ reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
1302
+ response_text = resp["choices"][0]["message"]["content"]
1360
1303
 
1361
- if verbose:
1362
- async for chunk in await litellm.acompletion(
1363
- **self._build_completion_params(
1364
- messages=messages,
1365
- temperature=temperature,
1366
- stream=True,
1367
- tools=formatted_tools,
1368
- **kwargs
1369
- )
1370
- ):
1371
- if chunk and chunk.choices and chunk.choices[0].delta:
1372
- delta = chunk.choices[0].delta
1373
- response_text, tool_calls = self._process_stream_delta(
1374
- delta, response_text, tool_calls, formatted_tools
1304
+ if verbose and reasoning_content:
1305
+ display_interaction(
1306
+ "Initial reasoning:",
1307
+ f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
1308
+ markdown=markdown,
1309
+ generation_time=time.time() - start_time,
1310
+ console=console
1311
+ )
1312
+ elif verbose:
1313
+ display_interaction(
1314
+ "Initial response:",
1315
+ response_text,
1316
+ markdown=markdown,
1317
+ generation_time=time.time() - start_time,
1318
+ console=console
1319
+ )
1320
+ else:
1321
+ # Determine if we should use streaming based on tool support
1322
+ use_streaming = stream
1323
+ if formatted_tools and not self._supports_streaming_tools():
1324
+ # Provider doesn't support streaming with tools, use non-streaming
1325
+ use_streaming = False
1326
+
1327
+ if use_streaming:
1328
+ # Streaming approach (with or without tools)
1329
+ tool_calls = []
1330
+
1331
+ if verbose:
1332
+ async for chunk in await litellm.acompletion(
1333
+ **self._build_completion_params(
1334
+ messages=messages,
1335
+ temperature=temperature,
1336
+ stream=True,
1337
+ tools=formatted_tools,
1338
+ **kwargs
1375
1339
  )
1376
- if delta.content:
1377
- print("\033[K", end="\r")
1378
- print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
1340
+ ):
1341
+ if chunk and chunk.choices and chunk.choices[0].delta:
1342
+ delta = chunk.choices[0].delta
1343
+ response_text, tool_calls = self._process_stream_delta(
1344
+ delta, response_text, tool_calls, formatted_tools
1345
+ )
1346
+ if delta.content:
1347
+ print("\033[K", end="\r")
1348
+ print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
1379
1349
 
1350
+ else:
1351
+ # Non-verbose streaming
1352
+ async for chunk in await litellm.acompletion(
1353
+ **self._build_completion_params(
1354
+ messages=messages,
1355
+ temperature=temperature,
1356
+ stream=True,
1357
+ tools=formatted_tools,
1358
+ **kwargs
1359
+ )
1360
+ ):
1361
+ if chunk and chunk.choices and chunk.choices[0].delta:
1362
+ delta = chunk.choices[0].delta
1363
+ if delta.content:
1364
+ response_text += delta.content
1365
+
1366
+ # Capture tool calls from streaming chunks if provider supports it
1367
+ if formatted_tools and self._supports_streaming_tools():
1368
+ tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
1369
+
1370
+ response_text = response_text.strip() if response_text else "" if response_text else "" if response_text else ""
1371
+
1372
+ # We already have tool_calls from streaming if supported
1373
+ # No need for a second API call!
1380
1374
  else:
1381
- # Non-verbose streaming
1382
- async for chunk in await litellm.acompletion(
1375
+ # Non-streaming approach (when tools require it or streaming is disabled)
1376
+ tool_response = await litellm.acompletion(
1383
1377
  **self._build_completion_params(
1384
1378
  messages=messages,
1385
1379
  temperature=temperature,
1386
- stream=True,
1380
+ stream=False,
1387
1381
  tools=formatted_tools,
1388
- **kwargs
1382
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1389
1383
  )
1390
- ):
1391
- if chunk and chunk.choices and chunk.choices[0].delta:
1392
- delta = chunk.choices[0].delta
1393
- if delta.content:
1394
- response_text += delta.content
1395
-
1396
- # Capture tool calls from streaming chunks if provider supports it
1397
- if formatted_tools and self._supports_streaming_tools():
1398
- tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
1399
-
1400
- response_text = response_text.strip()
1401
-
1402
- # We already have tool_calls from streaming if supported
1403
- # No need for a second API call!
1404
- else:
1405
- # Non-streaming approach (when tools require it or streaming is disabled)
1406
- tool_response = await litellm.acompletion(
1407
- **self._build_completion_params(
1408
- messages=messages,
1409
- temperature=temperature,
1410
- stream=False,
1411
- tools=formatted_tools,
1412
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1413
- )
1414
- )
1415
- response_text = tool_response.choices[0].message.get("content", "")
1416
- tool_calls = tool_response.choices[0].message.get("tool_calls", [])
1417
-
1418
- if verbose:
1419
- # Display the complete response at once
1420
- display_interaction(
1421
- original_prompt,
1422
- response_text,
1423
- markdown=markdown,
1424
- generation_time=time.time() - start_time,
1425
- console=console
1426
1384
  )
1385
+ response_text = tool_response.choices[0].message.get("content", "")
1386
+ tool_calls = tool_response.choices[0].message.get("tool_calls", [])
1387
+
1388
+ if verbose:
1389
+ # Display the complete response at once
1390
+ display_interaction(
1391
+ original_prompt,
1392
+ response_text,
1393
+ markdown=markdown,
1394
+ generation_time=time.time() - start_time,
1395
+ console=console
1396
+ )
1427
1397
 
1428
- # Now handle tools if we have them (either from streaming or non-streaming)
1429
- if tools and execute_tool_fn and tool_calls:
1430
-
1431
- if tool_calls:
1398
+ # Now handle tools if we have them (either from streaming or non-streaming)
1399
+ if tools and execute_tool_fn and tool_calls:
1432
1400
  # Convert tool_calls to a serializable format for all providers
1433
1401
  serializable_tool_calls = self._serialize_tool_calls(tool_calls)
1434
1402
  messages.append({
@@ -1509,9 +1477,16 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1509
1477
  console=console
1510
1478
  )
1511
1479
 
1512
- # Return the final response after processing Ollama's follow-up
1480
+ # Store the response for potential final return
1513
1481
  if final_response_text:
1514
- return final_response_text
1482
+ # Update messages with the response to maintain conversation context
1483
+ messages.append({
1484
+ "role": "assistant",
1485
+ "content": final_response_text
1486
+ })
1487
+ # Continue the loop to check if more tools are needed
1488
+ iteration_count += 1
1489
+ continue
1515
1490
  else:
1516
1491
  logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
1517
1492
 
@@ -1576,7 +1551,28 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1576
1551
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1577
1552
  response_text += chunk.choices[0].delta.content
1578
1553
 
1579
- response_text = response_text.strip()
1554
+ response_text = response_text.strip() if response_text else "" if response_text else ""
1555
+
1556
+ # After tool execution, update messages and continue the loop
1557
+ if response_text:
1558
+ messages.append({
1559
+ "role": "assistant",
1560
+ "content": response_text
1561
+ })
1562
+
1563
+ # Store reasoning content if captured
1564
+ if reasoning_steps and reasoning_content:
1565
+ stored_reasoning_content = reasoning_content
1566
+
1567
+ # Continue the loop to check if more tools are needed
1568
+ iteration_count += 1
1569
+ continue
1570
+ else:
1571
+ # No tool calls, we're done with this iteration
1572
+ # If we've executed tools in previous iterations, this response contains the final answer
1573
+ if iteration_count > 0:
1574
+ final_response_text = response_text.strip()
1575
+ break
1580
1576
 
1581
1577
  # Handle output formatting
1582
1578
  if output_json or output_pydantic:
@@ -1588,13 +1584,27 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1588
1584
  return response_text
1589
1585
 
1590
1586
  if not self_reflect:
1587
+ # Use final_response_text if we went through tool iterations
1588
+ display_text = final_response_text if final_response_text else response_text
1589
+
1590
+ # Display with stored reasoning content if available
1591
1591
  if verbose:
1592
- display_interaction(original_prompt, response_text, markdown=markdown,
1593
- generation_time=time.time() - start_time, console=console)
1594
- # Return reasoning content if reasoning_steps is True
1595
- if reasoning_steps and reasoning_content:
1596
- return reasoning_content
1597
- return response_text
1592
+ if stored_reasoning_content:
1593
+ display_interaction(
1594
+ original_prompt,
1595
+ f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{display_text}",
1596
+ markdown=markdown,
1597
+ generation_time=time.time() - start_time,
1598
+ console=console
1599
+ )
1600
+ else:
1601
+ display_interaction(original_prompt, display_text, markdown=markdown,
1602
+ generation_time=time.time() - start_time, console=console)
1603
+
1604
+ # Return reasoning content if reasoning_steps is True and we have it
1605
+ if reasoning_steps and stored_reasoning_content:
1606
+ return stored_reasoning_content
1607
+ return display_text
1598
1608
 
1599
1609
  # Handle self-reflection
1600
1610
  reflection_prompt = f"""
@@ -1798,7 +1808,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1798
1808
 
1799
1809
  # Check if the response is just a JSON tool call
1800
1810
  try:
1801
- json_response = json.loads(response_text.strip())
1811
+ json_response = json.loads(response_text.strip() if response_text else "{}")
1802
1812
  if not (('name' in json_response or 'function' in json_response) and
1803
1813
  not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
1804
1814
  return None
@@ -2056,7 +2066,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2056
2066
  response_text += content
2057
2067
  else:
2058
2068
  response = litellm.completion(**completion_params)
2059
- response_text = response.choices[0].message.content.strip()
2069
+ response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
2060
2070
 
2061
2071
  if verbose:
2062
2072
  display_interaction(
@@ -2067,7 +2077,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2067
2077
  console=console or self.console
2068
2078
  )
2069
2079
 
2070
- return response_text.strip()
2080
+ return response_text.strip() if response_text else ""
2071
2081
 
2072
2082
  except Exception as error:
2073
2083
  display_error(f"Error in response: {str(error)}")
@@ -2144,7 +2154,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2144
2154
  response_text += content
2145
2155
  else:
2146
2156
  response = await litellm.acompletion(**completion_params)
2147
- response_text = response.choices[0].message.content.strip()
2157
+ response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
2148
2158
 
2149
2159
  if verbose:
2150
2160
  display_interaction(
@@ -2155,7 +2165,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2155
2165
  console=console or self.console
2156
2166
  )
2157
2167
 
2158
- return response_text.strip()
2168
+ return response_text.strip() if response_text else ""
2159
2169
 
2160
2170
  except Exception as error:
2161
2171
  display_error(f"Error in response_async: {str(error)}")
@@ -890,45 +890,8 @@ class OpenAIClient:
890
890
  "content": results_str
891
891
  })
892
892
 
893
- # Check if we should continue (for tools like sequential thinking)
894
- should_continue = False
895
- for tool_call in tool_calls:
896
- # Handle both ToolCall dataclass and OpenAI object
897
- if isinstance(tool_call, ToolCall):
898
- function_name = tool_call.function["name"]
899
- arguments = json.loads(tool_call.function["arguments"])
900
- else:
901
- function_name = tool_call.function.name
902
- arguments = json.loads(tool_call.function.arguments)
903
-
904
- # For sequential thinking tool, check if nextThoughtNeeded is True
905
- if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
906
- should_continue = True
907
- break
908
-
909
- if not should_continue:
910
- # Get final response after tool calls
911
- if stream:
912
- final_response = self.process_stream_response(
913
- messages=messages,
914
- model=model,
915
- temperature=temperature,
916
- tools=formatted_tools,
917
- start_time=start_time,
918
- console=console,
919
- display_fn=display_fn,
920
- reasoning_steps=reasoning_steps,
921
- **kwargs
922
- )
923
- else:
924
- final_response = self.create_completion(
925
- messages=messages,
926
- model=model,
927
- temperature=temperature,
928
- stream=False,
929
- **kwargs
930
- )
931
- break
893
+ # Continue the loop to allow more tool calls
894
+ # The model will see tool results and can make additional tool calls
932
895
 
933
896
  iteration_count += 1
934
897
  else:
@@ -1067,45 +1030,8 @@ class OpenAIClient:
1067
1030
  "content": results_str
1068
1031
  })
1069
1032
 
1070
- # Check if we should continue (for tools like sequential thinking)
1071
- should_continue = False
1072
- for tool_call in tool_calls:
1073
- # Handle both ToolCall dataclass and OpenAI object
1074
- if isinstance(tool_call, ToolCall):
1075
- function_name = tool_call.function["name"]
1076
- arguments = json.loads(tool_call.function["arguments"])
1077
- else:
1078
- function_name = tool_call.function.name
1079
- arguments = json.loads(tool_call.function.arguments)
1080
-
1081
- # For sequential thinking tool, check if nextThoughtNeeded is True
1082
- if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
1083
- should_continue = True
1084
- break
1085
-
1086
- if not should_continue:
1087
- # Get final response after tool calls
1088
- if stream:
1089
- final_response = await self.process_stream_response_async(
1090
- messages=messages,
1091
- model=model,
1092
- temperature=temperature,
1093
- tools=formatted_tools,
1094
- start_time=start_time,
1095
- console=console,
1096
- display_fn=display_fn,
1097
- reasoning_steps=reasoning_steps,
1098
- **kwargs
1099
- )
1100
- else:
1101
- final_response = await self.acreate_completion(
1102
- messages=messages,
1103
- model=model,
1104
- temperature=temperature,
1105
- stream=False,
1106
- **kwargs
1107
- )
1108
- break
1033
+ # Continue the loop to allow more tool calls
1034
+ # The model will see tool results and can make additional tool calls
1109
1035
 
1110
1036
  iteration_count += 1
1111
1037
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.125
3
+ Version: 0.0.127
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -64,6 +64,7 @@ praisonaiagents/tools/yfinance_tools.py
64
64
  praisonaiagents/tools/train/data/generatecot.py
65
65
  tests/test-graph-memory.py
66
66
  tests/test.py
67
+ tests/test_fix_comprehensive.py
67
68
  tests/test_handoff_compatibility.py
68
69
  tests/test_http_stream_basic.py
69
70
  tests/test_ollama_async_fix.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.125"
7
+ version = "0.0.127"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [
@@ -0,0 +1,75 @@
1
+ """
2
+ Test script to verify the fix for sequential tool calling.
3
+ The agent should:
4
+ 1. Call get_stock_price to get Google's stock price (100)
5
+ 2. Call multiply to multiply 100 by 2
6
+ 3. Return the final result (200)
7
+ """
8
+
9
+ from praisonaiagents import Agent
10
+
11
+ def get_stock_price(company_name: str) -> str:
12
+ """
13
+ Get the stock price of a company
14
+
15
+ Args:
16
+ company_name (str): The name of the company
17
+
18
+ Returns:
19
+ str: The stock price of the company
20
+ """
21
+ print(f"[Tool Called] get_stock_price({company_name})")
22
+ return f"The stock price of {company_name} is 100"
23
+
24
+ def multiply(a: int, b: int) -> int:
25
+ """
26
+ Multiply two numbers
27
+
28
+ Args:
29
+ a (int): First number
30
+ b (int): Second number
31
+
32
+ Returns:
33
+ int: Product of a and b
34
+ """
35
+ print(f"[Tool Called] multiply({a}, {b})")
36
+ return a * b
37
+
38
+ # Test with Gemini
39
+ print("=" * 60)
40
+ print("Testing with Gemini model")
41
+ print("=" * 60)
42
+
43
+ agent_gemini = Agent(
44
+ instructions="You are a helpful assistant. You can use the tools provided to you to help the user. When asked to multiply a stock price, first get the stock price, then multiply it.",
45
+ llm="gemini/gemini-2.5-pro",
46
+ tools=[get_stock_price, multiply],
47
+ verbose=True
48
+ )
49
+
50
+ result_gemini = agent_gemini.start("what is the stock price of Google? multiply the Google stock price with 2")
51
+ print(f"\nFinal Result (Gemini): {result_gemini}")
52
+
53
+ # Test with GPT-4
54
+ print("\n" + "=" * 60)
55
+ print("Testing with GPT-4 model")
56
+ print("=" * 60)
57
+
58
+ agent_gpt4 = Agent(
59
+ instructions="You are a helpful assistant. You can use the tools provided to you to help the user. When asked to multiply a stock price, first get the stock price, then multiply it.",
60
+ llm="gpt-4o",
61
+ tools=[get_stock_price, multiply],
62
+ verbose=True
63
+ )
64
+
65
+ result_gpt4 = agent_gpt4.start("what is the stock price of Google? multiply the Google stock price with 2")
66
+ print(f"\nFinal Result (GPT-4): {result_gpt4}")
67
+
68
+ # Verify results
69
+ print("\n" + "=" * 60)
70
+ print("Test Results Summary")
71
+ print("=" * 60)
72
+ print(f"Gemini result contains '200': {'200' in str(result_gemini) if result_gemini else False}")
73
+ print(f"GPT-4 result contains '200': {'200' in str(result_gpt4) if result_gpt4 else False}")
74
+ print(f"Gemini returned empty: {not result_gemini or result_gemini == ''}")
75
+ print(f"GPT-4 returned empty: {not result_gpt4 or result_gpt4 == ''}")