praisonaiagents 0.0.141__py3-none-any.whl → 0.0.143__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/__init__.py +12 -3
- praisonaiagents/agent/agent.py +32 -5
- praisonaiagents/agent/image_agent.py +19 -4
- praisonaiagents/knowledge/knowledge.py +360 -1
- praisonaiagents/llm/llm.py +45 -8
- praisonaiagents/llm/openai_client.py +144 -0
- praisonaiagents/mcp/mcp.py +54 -14
- praisonaiagents/memory/memory.py +390 -12
- praisonaiagents/telemetry/__init__.py +9 -2
- praisonaiagents/telemetry/telemetry.py +255 -25
- praisonaiagents/tools/__init__.py +17 -1
- praisonaiagents/tools/mongodb_tools.py +610 -0
- {praisonaiagents-0.0.141.dist-info → praisonaiagents-0.0.143.dist-info}/METADATA +5 -1
- {praisonaiagents-0.0.141.dist-info → praisonaiagents-0.0.143.dist-info}/RECORD +16 -15
- {praisonaiagents-0.0.141.dist-info → praisonaiagents-0.0.143.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.141.dist-info → praisonaiagents-0.0.143.dist-info}/top_level.txt +0 -0
@@ -1049,6 +1049,150 @@ class OpenAIClient:
|
|
1049
1049
|
break
|
1050
1050
|
|
1051
1051
|
return final_response
|
1052
|
+
|
1053
|
+
def chat_completion_with_tools_stream(
|
1054
|
+
self,
|
1055
|
+
messages: List[Dict[str, Any]],
|
1056
|
+
model: str = "gpt-4o",
|
1057
|
+
temperature: float = 0.7,
|
1058
|
+
tools: Optional[List[Any]] = None,
|
1059
|
+
execute_tool_fn: Optional[Callable] = None,
|
1060
|
+
reasoning_steps: bool = False,
|
1061
|
+
verbose: bool = True,
|
1062
|
+
max_iterations: int = 10,
|
1063
|
+
**kwargs
|
1064
|
+
):
|
1065
|
+
"""
|
1066
|
+
Create a streaming chat completion with tool support.
|
1067
|
+
|
1068
|
+
This method yields chunks of the response as they are generated,
|
1069
|
+
enabling real-time streaming to the user.
|
1070
|
+
|
1071
|
+
Args:
|
1072
|
+
messages: List of message dictionaries
|
1073
|
+
model: Model to use
|
1074
|
+
temperature: Temperature for generation
|
1075
|
+
tools: List of tools (can be callables, dicts, or strings)
|
1076
|
+
execute_tool_fn: Function to execute tools
|
1077
|
+
reasoning_steps: Whether to show reasoning
|
1078
|
+
verbose: Whether to show verbose output
|
1079
|
+
max_iterations: Maximum tool calling iterations
|
1080
|
+
**kwargs: Additional API parameters
|
1081
|
+
|
1082
|
+
Yields:
|
1083
|
+
String chunks of the response as they are generated
|
1084
|
+
"""
|
1085
|
+
# Format tools for OpenAI API
|
1086
|
+
formatted_tools = self.format_tools(tools)
|
1087
|
+
|
1088
|
+
# Continue tool execution loop until no more tool calls are needed
|
1089
|
+
iteration_count = 0
|
1090
|
+
|
1091
|
+
while iteration_count < max_iterations:
|
1092
|
+
try:
|
1093
|
+
# Create streaming response
|
1094
|
+
response_stream = self._sync_client.chat.completions.create(
|
1095
|
+
model=model,
|
1096
|
+
messages=messages,
|
1097
|
+
temperature=temperature,
|
1098
|
+
tools=formatted_tools if formatted_tools else None,
|
1099
|
+
stream=True,
|
1100
|
+
**kwargs
|
1101
|
+
)
|
1102
|
+
|
1103
|
+
full_response_text = ""
|
1104
|
+
reasoning_content = ""
|
1105
|
+
chunks = []
|
1106
|
+
|
1107
|
+
# Stream the response chunk by chunk
|
1108
|
+
for chunk in response_stream:
|
1109
|
+
chunks.append(chunk)
|
1110
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
1111
|
+
content = chunk.choices[0].delta.content
|
1112
|
+
full_response_text += content
|
1113
|
+
yield content
|
1114
|
+
|
1115
|
+
# Handle reasoning content if enabled
|
1116
|
+
if reasoning_steps and chunk.choices and hasattr(chunk.choices[0].delta, "reasoning_content"):
|
1117
|
+
rc = chunk.choices[0].delta.reasoning_content
|
1118
|
+
if rc:
|
1119
|
+
reasoning_content += rc
|
1120
|
+
yield f"[Reasoning: {rc}]"
|
1121
|
+
|
1122
|
+
# Process the complete response to check for tool calls
|
1123
|
+
final_response = process_stream_chunks(chunks)
|
1124
|
+
|
1125
|
+
if not final_response:
|
1126
|
+
return
|
1127
|
+
|
1128
|
+
# Check for tool calls
|
1129
|
+
tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
|
1130
|
+
|
1131
|
+
if tool_calls and execute_tool_fn:
|
1132
|
+
# Convert ToolCall dataclass objects to dict for JSON serialization
|
1133
|
+
serializable_tool_calls = []
|
1134
|
+
for tc in tool_calls:
|
1135
|
+
if isinstance(tc, ToolCall):
|
1136
|
+
# Convert dataclass to dict
|
1137
|
+
serializable_tool_calls.append({
|
1138
|
+
"id": tc.id,
|
1139
|
+
"type": tc.type,
|
1140
|
+
"function": tc.function
|
1141
|
+
})
|
1142
|
+
else:
|
1143
|
+
# Already an OpenAI object, keep as is
|
1144
|
+
serializable_tool_calls.append(tc)
|
1145
|
+
|
1146
|
+
messages.append({
|
1147
|
+
"role": "assistant",
|
1148
|
+
"content": final_response.choices[0].message.content,
|
1149
|
+
"tool_calls": serializable_tool_calls
|
1150
|
+
})
|
1151
|
+
|
1152
|
+
for tool_call in tool_calls:
|
1153
|
+
# Handle both ToolCall dataclass and OpenAI object
|
1154
|
+
try:
|
1155
|
+
if isinstance(tool_call, ToolCall):
|
1156
|
+
function_name = tool_call.function["name"]
|
1157
|
+
arguments = json.loads(tool_call.function["arguments"])
|
1158
|
+
else:
|
1159
|
+
function_name = tool_call.function.name
|
1160
|
+
arguments = json.loads(tool_call.function.arguments)
|
1161
|
+
except json.JSONDecodeError as e:
|
1162
|
+
if verbose:
|
1163
|
+
yield f"\n[Error parsing arguments for {function_name if 'function_name' in locals() else 'unknown function'}: {str(e)}]"
|
1164
|
+
continue
|
1165
|
+
|
1166
|
+
if verbose:
|
1167
|
+
yield f"\n[Calling function: {function_name}]"
|
1168
|
+
|
1169
|
+
# Execute the tool with error handling
|
1170
|
+
try:
|
1171
|
+
tool_result = execute_tool_fn(function_name, arguments)
|
1172
|
+
results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
|
1173
|
+
except Exception as e:
|
1174
|
+
results_str = f"Error executing function: {str(e)}"
|
1175
|
+
if verbose:
|
1176
|
+
yield f"\n[Function error: {str(e)}]"
|
1177
|
+
|
1178
|
+
if verbose:
|
1179
|
+
yield f"\n[Function result: {results_str}]"
|
1180
|
+
|
1181
|
+
messages.append({
|
1182
|
+
"role": "tool",
|
1183
|
+
"tool_call_id": tool_call.id if hasattr(tool_call, 'id') else tool_call['id'],
|
1184
|
+
"content": results_str
|
1185
|
+
})
|
1186
|
+
|
1187
|
+
# Continue the loop to allow more tool calls
|
1188
|
+
iteration_count += 1
|
1189
|
+
else:
|
1190
|
+
# No tool calls, we're done
|
1191
|
+
break
|
1192
|
+
|
1193
|
+
except Exception as e:
|
1194
|
+
yield f"Error: {str(e)}"
|
1195
|
+
break
|
1052
1196
|
|
1053
1197
|
def parse_structured_output(
|
1054
1198
|
self,
|
praisonaiagents/mcp/mcp.py
CHANGED
@@ -25,6 +25,8 @@ class MCPToolRunner(threading.Thread):
|
|
25
25
|
self.initialized = threading.Event()
|
26
26
|
self.tools = []
|
27
27
|
self.timeout = timeout
|
28
|
+
self._tool_timings = {}
|
29
|
+
self._timings_lock = threading.Lock()
|
28
30
|
self.start()
|
29
31
|
|
30
32
|
def run(self):
|
@@ -75,25 +77,63 @@ class MCPToolRunner(threading.Thread):
|
|
75
77
|
|
76
78
|
def call_tool(self, tool_name, arguments):
|
77
79
|
"""Call an MCP tool and wait for the result."""
|
80
|
+
# Import telemetry here to avoid circular imports
|
81
|
+
try:
|
82
|
+
from ..telemetry.telemetry import get_telemetry
|
83
|
+
telemetry = get_telemetry()
|
84
|
+
except (ImportError, AttributeError):
|
85
|
+
telemetry = None
|
86
|
+
|
87
|
+
# Check initialization first (without timing)
|
78
88
|
if not self.initialized.is_set():
|
79
89
|
self.initialized.wait(timeout=self.timeout)
|
80
90
|
if not self.initialized.is_set():
|
91
|
+
# Track initialization timeout failure
|
92
|
+
if telemetry:
|
93
|
+
telemetry.track_tool_usage(tool_name, success=False, execution_time=0)
|
81
94
|
return f"Error: MCP initialization timed out after {self.timeout} seconds"
|
82
95
|
|
83
|
-
#
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
96
|
+
# Start timing after initialization check
|
97
|
+
start_time = time.time()
|
98
|
+
is_success = False
|
99
|
+
try:
|
100
|
+
# Put request in queue
|
101
|
+
self.queue.put((tool_name, arguments))
|
102
|
+
|
103
|
+
# Wait for result
|
104
|
+
success, result = self.result_queue.get()
|
105
|
+
if not success:
|
106
|
+
return f"Error: {result}"
|
107
|
+
|
108
|
+
# Process result
|
109
|
+
if hasattr(result, 'content') and result.content:
|
110
|
+
if hasattr(result.content[0], 'text'):
|
111
|
+
processed_result = result.content[0].text
|
112
|
+
else:
|
113
|
+
processed_result = str(result.content[0])
|
114
|
+
else:
|
115
|
+
processed_result = str(result)
|
116
|
+
|
117
|
+
is_success = True
|
118
|
+
return processed_result
|
119
|
+
|
120
|
+
except Exception as e:
|
121
|
+
return f"Error: {str(e)}"
|
122
|
+
finally:
|
123
|
+
# Track timing regardless of success/failure
|
124
|
+
end_time = time.time()
|
125
|
+
execution_time = end_time - start_time
|
126
|
+
|
127
|
+
# Log timing information for debugging
|
128
|
+
logging.debug(f"Tool '{tool_name}' execution time: {execution_time:.3f} seconds")
|
129
|
+
|
130
|
+
# Store timing in thread-safe manner
|
131
|
+
with self._timings_lock:
|
132
|
+
self._tool_timings[tool_name] = execution_time
|
133
|
+
|
134
|
+
# Track tool usage with timing information
|
135
|
+
if telemetry:
|
136
|
+
telemetry.track_tool_usage(tool_name, success=is_success, execution_time=execution_time)
|
97
137
|
|
98
138
|
def shutdown(self):
|
99
139
|
"""Signal the thread to shut down."""
|