praisonaiagents 0.0.126__tar.gz → 0.0.127__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/PKG-INFO +1 -1
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/llm/llm.py +12 -12
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/llm/openai_client.py +4 -78
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents.egg-info/PKG-INFO +1 -1
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/pyproject.toml +1 -1
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/README.md +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/agent/agent.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/agent/handoff.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/agent/image_agent.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/agents/autoagents.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/approval.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/guardrails/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/guardrails/guardrail_result.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/knowledge/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/knowledge/chunking.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/knowledge/knowledge.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/llm/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/llm/model_capabilities.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/mcp/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/mcp/mcp.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/mcp/mcp_sse.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/memory/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/memory/memory.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/session.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/telemetry/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/telemetry/integration.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/telemetry/telemetry.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/README.md +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/searxng_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/train/data/generatecot.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents.egg-info/SOURCES.txt +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents.egg-info/requires.txt +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/setup.cfg +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/tests/test-graph-memory.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/tests/test.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/tests/test_fix_comprehensive.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/tests/test_handoff_compatibility.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/tests/test_http_stream_basic.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/tests/test_ollama_async_fix.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/tests/test_ollama_fix.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/tests/test_posthog_fixed.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/tests/test_validation_feedback.py +0 -0
@@ -772,7 +772,7 @@ class LLM:
|
|
772
772
|
if formatted_tools and self._supports_streaming_tools():
|
773
773
|
tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
|
774
774
|
|
775
|
-
response_text = response_text.strip()
|
775
|
+
response_text = response_text.strip() if response_text else "" if response_text else "" if response_text else "" if response_text else ""
|
776
776
|
|
777
777
|
# Create a mock final_response with the captured data
|
778
778
|
final_response = {
|
@@ -904,7 +904,7 @@ class LLM:
|
|
904
904
|
|
905
905
|
# Set flag to indicate Ollama was handled
|
906
906
|
ollama_handled = True
|
907
|
-
final_response_text = response_text.strip()
|
907
|
+
final_response_text = response_text.strip() if response_text else ""
|
908
908
|
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
909
909
|
|
910
910
|
# Display the response if we got one
|
@@ -962,7 +962,7 @@ class LLM:
|
|
962
962
|
# No tool calls, we're done with this iteration
|
963
963
|
# If we've executed tools in previous iterations, this response contains the final answer
|
964
964
|
if iteration_count > 0:
|
965
|
-
final_response_text = response_text.strip()
|
965
|
+
final_response_text = response_text.strip() if response_text else ""
|
966
966
|
break
|
967
967
|
|
968
968
|
except Exception as e:
|
@@ -993,7 +993,7 @@ class LLM:
|
|
993
993
|
console=console
|
994
994
|
)
|
995
995
|
|
996
|
-
response_text = response_text.strip()
|
996
|
+
response_text = response_text.strip() if response_text else ""
|
997
997
|
|
998
998
|
# Return reasoning content if reasoning_steps is True and we have it
|
999
999
|
if reasoning_steps and stored_reasoning_content:
|
@@ -1155,7 +1155,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1155
1155
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1156
1156
|
response_text += chunk.choices[0].delta.content
|
1157
1157
|
|
1158
|
-
response_text = response_text.strip()
|
1158
|
+
response_text = response_text.strip() if response_text else "" if response_text else ""
|
1159
1159
|
continue
|
1160
1160
|
|
1161
1161
|
except json.JSONDecodeError:
|
@@ -1367,7 +1367,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1367
1367
|
if formatted_tools and self._supports_streaming_tools():
|
1368
1368
|
tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
|
1369
1369
|
|
1370
|
-
response_text = response_text.strip()
|
1370
|
+
response_text = response_text.strip() if response_text else "" if response_text else "" if response_text else ""
|
1371
1371
|
|
1372
1372
|
# We already have tool_calls from streaming if supported
|
1373
1373
|
# No need for a second API call!
|
@@ -1551,7 +1551,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1551
1551
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1552
1552
|
response_text += chunk.choices[0].delta.content
|
1553
1553
|
|
1554
|
-
response_text = response_text.strip()
|
1554
|
+
response_text = response_text.strip() if response_text else "" if response_text else ""
|
1555
1555
|
|
1556
1556
|
# After tool execution, update messages and continue the loop
|
1557
1557
|
if response_text:
|
@@ -1808,7 +1808,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1808
1808
|
|
1809
1809
|
# Check if the response is just a JSON tool call
|
1810
1810
|
try:
|
1811
|
-
json_response = json.loads(response_text.strip())
|
1811
|
+
json_response = json.loads(response_text.strip() if response_text else "{}")
|
1812
1812
|
if not (('name' in json_response or 'function' in json_response) and
|
1813
1813
|
not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
|
1814
1814
|
return None
|
@@ -2066,7 +2066,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2066
2066
|
response_text += content
|
2067
2067
|
else:
|
2068
2068
|
response = litellm.completion(**completion_params)
|
2069
|
-
response_text = response.choices[0].message.content.strip()
|
2069
|
+
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
|
2070
2070
|
|
2071
2071
|
if verbose:
|
2072
2072
|
display_interaction(
|
@@ -2077,7 +2077,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2077
2077
|
console=console or self.console
|
2078
2078
|
)
|
2079
2079
|
|
2080
|
-
return response_text.strip()
|
2080
|
+
return response_text.strip() if response_text else ""
|
2081
2081
|
|
2082
2082
|
except Exception as error:
|
2083
2083
|
display_error(f"Error in response: {str(error)}")
|
@@ -2154,7 +2154,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2154
2154
|
response_text += content
|
2155
2155
|
else:
|
2156
2156
|
response = await litellm.acompletion(**completion_params)
|
2157
|
-
response_text = response.choices[0].message.content.strip()
|
2157
|
+
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
|
2158
2158
|
|
2159
2159
|
if verbose:
|
2160
2160
|
display_interaction(
|
@@ -2165,7 +2165,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2165
2165
|
console=console or self.console
|
2166
2166
|
)
|
2167
2167
|
|
2168
|
-
return response_text.strip()
|
2168
|
+
return response_text.strip() if response_text else ""
|
2169
2169
|
|
2170
2170
|
except Exception as error:
|
2171
2171
|
display_error(f"Error in response_async: {str(error)}")
|
@@ -890,45 +890,8 @@ class OpenAIClient:
|
|
890
890
|
"content": results_str
|
891
891
|
})
|
892
892
|
|
893
|
-
#
|
894
|
-
|
895
|
-
for tool_call in tool_calls:
|
896
|
-
# Handle both ToolCall dataclass and OpenAI object
|
897
|
-
if isinstance(tool_call, ToolCall):
|
898
|
-
function_name = tool_call.function["name"]
|
899
|
-
arguments = json.loads(tool_call.function["arguments"])
|
900
|
-
else:
|
901
|
-
function_name = tool_call.function.name
|
902
|
-
arguments = json.loads(tool_call.function.arguments)
|
903
|
-
|
904
|
-
# For sequential thinking tool, check if nextThoughtNeeded is True
|
905
|
-
if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
|
906
|
-
should_continue = True
|
907
|
-
break
|
908
|
-
|
909
|
-
if not should_continue:
|
910
|
-
# Get final response after tool calls
|
911
|
-
if stream:
|
912
|
-
final_response = self.process_stream_response(
|
913
|
-
messages=messages,
|
914
|
-
model=model,
|
915
|
-
temperature=temperature,
|
916
|
-
tools=formatted_tools,
|
917
|
-
start_time=start_time,
|
918
|
-
console=console,
|
919
|
-
display_fn=display_fn,
|
920
|
-
reasoning_steps=reasoning_steps,
|
921
|
-
**kwargs
|
922
|
-
)
|
923
|
-
else:
|
924
|
-
final_response = self.create_completion(
|
925
|
-
messages=messages,
|
926
|
-
model=model,
|
927
|
-
temperature=temperature,
|
928
|
-
stream=False,
|
929
|
-
**kwargs
|
930
|
-
)
|
931
|
-
break
|
893
|
+
# Continue the loop to allow more tool calls
|
894
|
+
# The model will see tool results and can make additional tool calls
|
932
895
|
|
933
896
|
iteration_count += 1
|
934
897
|
else:
|
@@ -1067,45 +1030,8 @@ class OpenAIClient:
|
|
1067
1030
|
"content": results_str
|
1068
1031
|
})
|
1069
1032
|
|
1070
|
-
#
|
1071
|
-
|
1072
|
-
for tool_call in tool_calls:
|
1073
|
-
# Handle both ToolCall dataclass and OpenAI object
|
1074
|
-
if isinstance(tool_call, ToolCall):
|
1075
|
-
function_name = tool_call.function["name"]
|
1076
|
-
arguments = json.loads(tool_call.function["arguments"])
|
1077
|
-
else:
|
1078
|
-
function_name = tool_call.function.name
|
1079
|
-
arguments = json.loads(tool_call.function.arguments)
|
1080
|
-
|
1081
|
-
# For sequential thinking tool, check if nextThoughtNeeded is True
|
1082
|
-
if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
|
1083
|
-
should_continue = True
|
1084
|
-
break
|
1085
|
-
|
1086
|
-
if not should_continue:
|
1087
|
-
# Get final response after tool calls
|
1088
|
-
if stream:
|
1089
|
-
final_response = await self.process_stream_response_async(
|
1090
|
-
messages=messages,
|
1091
|
-
model=model,
|
1092
|
-
temperature=temperature,
|
1093
|
-
tools=formatted_tools,
|
1094
|
-
start_time=start_time,
|
1095
|
-
console=console,
|
1096
|
-
display_fn=display_fn,
|
1097
|
-
reasoning_steps=reasoning_steps,
|
1098
|
-
**kwargs
|
1099
|
-
)
|
1100
|
-
else:
|
1101
|
-
final_response = await self.acreate_completion(
|
1102
|
-
messages=messages,
|
1103
|
-
model=model,
|
1104
|
-
temperature=temperature,
|
1105
|
-
stream=False,
|
1106
|
-
**kwargs
|
1107
|
-
)
|
1108
|
-
break
|
1033
|
+
# Continue the loop to allow more tool calls
|
1034
|
+
# The model will see tool results and can make additional tool calls
|
1109
1035
|
|
1110
1036
|
iteration_count += 1
|
1111
1037
|
else:
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/guardrails/guardrail_result.py
RENAMED
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/guardrails/llm_guardrail.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/llm/model_capabilities.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/telemetry/integration.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/calculator_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/duckduckgo_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/newspaper_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/train/data/generatecot.py
RENAMED
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents/tools/wikipedia_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.127}/praisonaiagents.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|