praisonaiagents 0.0.126__py3-none-any.whl → 0.0.127__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -772,7 +772,7 @@ class LLM:
772
772
  if formatted_tools and self._supports_streaming_tools():
773
773
  tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
774
774
 
775
- response_text = response_text.strip()
775
+ response_text = response_text.strip() if response_text else "" if response_text else "" if response_text else "" if response_text else ""
776
776
 
777
777
  # Create a mock final_response with the captured data
778
778
  final_response = {
@@ -904,7 +904,7 @@ class LLM:
904
904
 
905
905
  # Set flag to indicate Ollama was handled
906
906
  ollama_handled = True
907
- final_response_text = response_text.strip()
907
+ final_response_text = response_text.strip() if response_text else ""
908
908
  logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
909
909
 
910
910
  # Display the response if we got one
@@ -962,7 +962,7 @@ class LLM:
962
962
  # No tool calls, we're done with this iteration
963
963
  # If we've executed tools in previous iterations, this response contains the final answer
964
964
  if iteration_count > 0:
965
- final_response_text = response_text.strip()
965
+ final_response_text = response_text.strip() if response_text else ""
966
966
  break
967
967
 
968
968
  except Exception as e:
@@ -993,7 +993,7 @@ class LLM:
993
993
  console=console
994
994
  )
995
995
 
996
- response_text = response_text.strip()
996
+ response_text = response_text.strip() if response_text else ""
997
997
 
998
998
  # Return reasoning content if reasoning_steps is True and we have it
999
999
  if reasoning_steps and stored_reasoning_content:
@@ -1155,7 +1155,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1155
1155
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1156
1156
  response_text += chunk.choices[0].delta.content
1157
1157
 
1158
- response_text = response_text.strip()
1158
+ response_text = response_text.strip() if response_text else "" if response_text else ""
1159
1159
  continue
1160
1160
 
1161
1161
  except json.JSONDecodeError:
@@ -1367,7 +1367,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1367
1367
  if formatted_tools and self._supports_streaming_tools():
1368
1368
  tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
1369
1369
 
1370
- response_text = response_text.strip()
1370
+ response_text = response_text.strip() if response_text else "" if response_text else "" if response_text else ""
1371
1371
 
1372
1372
  # We already have tool_calls from streaming if supported
1373
1373
  # No need for a second API call!
@@ -1551,7 +1551,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1551
1551
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1552
1552
  response_text += chunk.choices[0].delta.content
1553
1553
 
1554
- response_text = response_text.strip()
1554
+ response_text = response_text.strip() if response_text else "" if response_text else ""
1555
1555
 
1556
1556
  # After tool execution, update messages and continue the loop
1557
1557
  if response_text:
@@ -1808,7 +1808,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1808
1808
 
1809
1809
  # Check if the response is just a JSON tool call
1810
1810
  try:
1811
- json_response = json.loads(response_text.strip())
1811
+ json_response = json.loads(response_text.strip() if response_text else "{}")
1812
1812
  if not (('name' in json_response or 'function' in json_response) and
1813
1813
  not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
1814
1814
  return None
@@ -2066,7 +2066,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2066
2066
  response_text += content
2067
2067
  else:
2068
2068
  response = litellm.completion(**completion_params)
2069
- response_text = response.choices[0].message.content.strip()
2069
+ response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
2070
2070
 
2071
2071
  if verbose:
2072
2072
  display_interaction(
@@ -2077,7 +2077,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2077
2077
  console=console or self.console
2078
2078
  )
2079
2079
 
2080
- return response_text.strip()
2080
+ return response_text.strip() if response_text else ""
2081
2081
 
2082
2082
  except Exception as error:
2083
2083
  display_error(f"Error in response: {str(error)}")
@@ -2154,7 +2154,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2154
2154
  response_text += content
2155
2155
  else:
2156
2156
  response = await litellm.acompletion(**completion_params)
2157
- response_text = response.choices[0].message.content.strip()
2157
+ response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
2158
2158
 
2159
2159
  if verbose:
2160
2160
  display_interaction(
@@ -2165,7 +2165,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2165
2165
  console=console or self.console
2166
2166
  )
2167
2167
 
2168
- return response_text.strip()
2168
+ return response_text.strip() if response_text else ""
2169
2169
 
2170
2170
  except Exception as error:
2171
2171
  display_error(f"Error in response_async: {str(error)}")
@@ -890,45 +890,8 @@ class OpenAIClient:
890
890
  "content": results_str
891
891
  })
892
892
 
893
- # Check if we should continue (for tools like sequential thinking)
894
- should_continue = False
895
- for tool_call in tool_calls:
896
- # Handle both ToolCall dataclass and OpenAI object
897
- if isinstance(tool_call, ToolCall):
898
- function_name = tool_call.function["name"]
899
- arguments = json.loads(tool_call.function["arguments"])
900
- else:
901
- function_name = tool_call.function.name
902
- arguments = json.loads(tool_call.function.arguments)
903
-
904
- # For sequential thinking tool, check if nextThoughtNeeded is True
905
- if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
906
- should_continue = True
907
- break
908
-
909
- if not should_continue:
910
- # Get final response after tool calls
911
- if stream:
912
- final_response = self.process_stream_response(
913
- messages=messages,
914
- model=model,
915
- temperature=temperature,
916
- tools=formatted_tools,
917
- start_time=start_time,
918
- console=console,
919
- display_fn=display_fn,
920
- reasoning_steps=reasoning_steps,
921
- **kwargs
922
- )
923
- else:
924
- final_response = self.create_completion(
925
- messages=messages,
926
- model=model,
927
- temperature=temperature,
928
- stream=False,
929
- **kwargs
930
- )
931
- break
893
+ # Continue the loop to allow more tool calls
894
+ # The model will see tool results and can make additional tool calls
932
895
 
933
896
  iteration_count += 1
934
897
  else:
@@ -1067,45 +1030,8 @@ class OpenAIClient:
1067
1030
  "content": results_str
1068
1031
  })
1069
1032
 
1070
- # Check if we should continue (for tools like sequential thinking)
1071
- should_continue = False
1072
- for tool_call in tool_calls:
1073
- # Handle both ToolCall dataclass and OpenAI object
1074
- if isinstance(tool_call, ToolCall):
1075
- function_name = tool_call.function["name"]
1076
- arguments = json.loads(tool_call.function["arguments"])
1077
- else:
1078
- function_name = tool_call.function.name
1079
- arguments = json.loads(tool_call.function.arguments)
1080
-
1081
- # For sequential thinking tool, check if nextThoughtNeeded is True
1082
- if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
1083
- should_continue = True
1084
- break
1085
-
1086
- if not should_continue:
1087
- # Get final response after tool calls
1088
- if stream:
1089
- final_response = await self.process_stream_response_async(
1090
- messages=messages,
1091
- model=model,
1092
- temperature=temperature,
1093
- tools=formatted_tools,
1094
- start_time=start_time,
1095
- console=console,
1096
- display_fn=display_fn,
1097
- reasoning_steps=reasoning_steps,
1098
- **kwargs
1099
- )
1100
- else:
1101
- final_response = await self.acreate_completion(
1102
- messages=messages,
1103
- model=model,
1104
- temperature=temperature,
1105
- stream=False,
1106
- **kwargs
1107
- )
1108
- break
1033
+ # Continue the loop to allow more tool calls
1034
+ # The model will see tool results and can make additional tool calls
1109
1035
 
1110
1036
  iteration_count += 1
1111
1037
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.126
3
+ Version: 0.0.127
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -16,9 +16,9 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
16
16
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
17
17
  praisonaiagents/knowledge/knowledge.py,sha256=-di_h9HxXQfAhTMMerhK16tfw8DtUndp44TGkBOzkZs,15539
18
18
  praisonaiagents/llm/__init__.py,sha256=DtFSBjsVQj7AOTM0x5Q0bZnrbxb-t2ljom5Aid5xJEs,1547
19
- praisonaiagents/llm/llm.py,sha256=rewZhxoaYvVN7Hwj2lAYC3PS9RNIJkyhlyvj9W2Yh9Y,110721
19
+ praisonaiagents/llm/llm.py,sha256=SqLerv8PeerpX1w_esT3MByXKYETbT8KUlYvaZvwSbE,111242
20
20
  praisonaiagents/llm/model_capabilities.py,sha256=poxOxATUOi9XPTx3v6BPnXvSfikWSA9NciWQVuPU7Zg,2586
21
- praisonaiagents/llm/openai_client.py,sha256=0JvjCDHoH8I8kIt5vvObARkGdVaPWdTIv_FoEQ5EQPA,48973
21
+ praisonaiagents/llm/openai_client.py,sha256=6KANw9SNiglvfJvTcpDPZjuTKG6cThD1t-ZqgKvmZiw,45356
22
22
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
23
23
  praisonaiagents/mcp/mcp.py,sha256=T0G0rQotHxk9qTnG1tjQLr4c0BUSLnEqz9sIMx4F954,21598
24
24
  praisonaiagents/mcp/mcp_http_stream.py,sha256=Yh-69eIlLQS_M0bd__y7NzSjOqqX6R8Ed4eJQw6xXgg,18314
@@ -55,7 +55,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
55
55
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
56
56
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
57
57
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
58
- praisonaiagents-0.0.126.dist-info/METADATA,sha256=uLef5SSGpu_Zs4lOWdSMXrfMy78p0-Shacqup0JNpK8,1699
59
- praisonaiagents-0.0.126.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
60
- praisonaiagents-0.0.126.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
61
- praisonaiagents-0.0.126.dist-info/RECORD,,
58
+ praisonaiagents-0.0.127.dist-info/METADATA,sha256=j5uEGBSJuDKv3oTVebl-NIkmea34Lm_HnHN18Jd3xyY,1699
59
+ praisonaiagents-0.0.127.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
60
+ praisonaiagents-0.0.127.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
61
+ praisonaiagents-0.0.127.dist-info/RECORD,,