praisonaiagents 0.0.113__py3-none-any.whl → 0.0.115__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +164 -413
- praisonaiagents/llm/__init__.py +26 -1
- praisonaiagents/llm/openai_client.py +1219 -0
- {praisonaiagents-0.0.113.dist-info → praisonaiagents-0.0.115.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.113.dist-info → praisonaiagents-0.0.115.dist-info}/RECORD +7 -6
- {praisonaiagents-0.0.113.dist-info → praisonaiagents-0.0.115.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.113.dist-info → praisonaiagents-0.0.115.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -1,12 +1,23 @@
|
|
1
1
|
import os
|
2
2
|
import time
|
3
3
|
import json
|
4
|
+
import copy
|
4
5
|
import logging
|
5
6
|
import asyncio
|
6
7
|
from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING, Callable, Tuple
|
7
8
|
from rich.console import Console
|
8
9
|
from rich.live import Live
|
9
|
-
from
|
10
|
+
from ..llm import (
|
11
|
+
get_openai_client,
|
12
|
+
ChatCompletionMessage,
|
13
|
+
Choice,
|
14
|
+
CompletionTokensDetails,
|
15
|
+
PromptTokensDetails,
|
16
|
+
CompletionUsage,
|
17
|
+
ChatCompletion,
|
18
|
+
ToolCall,
|
19
|
+
process_stream_chunks
|
20
|
+
)
|
10
21
|
from ..main import (
|
11
22
|
display_error,
|
12
23
|
display_tool_call,
|
@@ -15,13 +26,11 @@ from ..main import (
|
|
15
26
|
display_generating,
|
16
27
|
display_self_reflection,
|
17
28
|
ReflectionOutput,
|
18
|
-
client,
|
19
29
|
adisplay_instruction,
|
20
30
|
approval_callback
|
21
31
|
)
|
22
32
|
import inspect
|
23
33
|
import uuid
|
24
|
-
from dataclasses import dataclass
|
25
34
|
|
26
35
|
# Global variables for API server
|
27
36
|
_server_started = {} # Dict of port -> started boolean
|
@@ -35,175 +44,6 @@ if TYPE_CHECKING:
|
|
35
44
|
from ..main import TaskOutput
|
36
45
|
from ..handoff import Handoff
|
37
46
|
|
38
|
-
@dataclass
|
39
|
-
class ChatCompletionMessage:
|
40
|
-
content: str
|
41
|
-
role: str = "assistant"
|
42
|
-
refusal: Optional[str] = None
|
43
|
-
audio: Optional[str] = None
|
44
|
-
function_call: Optional[dict] = None
|
45
|
-
tool_calls: Optional[List] = None
|
46
|
-
reasoning_content: Optional[str] = None
|
47
|
-
|
48
|
-
@dataclass
|
49
|
-
class Choice:
|
50
|
-
finish_reason: Optional[str]
|
51
|
-
index: int
|
52
|
-
message: ChatCompletionMessage
|
53
|
-
logprobs: Optional[dict] = None
|
54
|
-
|
55
|
-
@dataclass
|
56
|
-
class CompletionTokensDetails:
|
57
|
-
accepted_prediction_tokens: Optional[int] = None
|
58
|
-
audio_tokens: Optional[int] = None
|
59
|
-
reasoning_tokens: Optional[int] = None
|
60
|
-
rejected_prediction_tokens: Optional[int] = None
|
61
|
-
|
62
|
-
@dataclass
|
63
|
-
class PromptTokensDetails:
|
64
|
-
audio_tokens: Optional[int] = None
|
65
|
-
cached_tokens: int = 0
|
66
|
-
|
67
|
-
@dataclass
|
68
|
-
class CompletionUsage:
|
69
|
-
completion_tokens: int = 0
|
70
|
-
prompt_tokens: int = 0
|
71
|
-
total_tokens: int = 0
|
72
|
-
completion_tokens_details: Optional[CompletionTokensDetails] = None
|
73
|
-
prompt_tokens_details: Optional[PromptTokensDetails] = None
|
74
|
-
prompt_cache_hit_tokens: int = 0
|
75
|
-
prompt_cache_miss_tokens: int = 0
|
76
|
-
|
77
|
-
@dataclass
|
78
|
-
class ChatCompletion:
|
79
|
-
id: str
|
80
|
-
choices: List[Choice]
|
81
|
-
created: int
|
82
|
-
model: str
|
83
|
-
object: str = "chat.completion"
|
84
|
-
system_fingerprint: Optional[str] = None
|
85
|
-
service_tier: Optional[str] = None
|
86
|
-
usage: Optional[CompletionUsage] = None
|
87
|
-
|
88
|
-
def process_stream_chunks(chunks):
|
89
|
-
"""Process streaming chunks into combined response"""
|
90
|
-
if not chunks:
|
91
|
-
return None
|
92
|
-
|
93
|
-
try:
|
94
|
-
first_chunk = chunks[0]
|
95
|
-
last_chunk = chunks[-1]
|
96
|
-
|
97
|
-
# Basic metadata
|
98
|
-
id = getattr(first_chunk, "id", None)
|
99
|
-
created = getattr(first_chunk, "created", None)
|
100
|
-
model = getattr(first_chunk, "model", None)
|
101
|
-
system_fingerprint = getattr(first_chunk, "system_fingerprint", None)
|
102
|
-
|
103
|
-
# Track usage
|
104
|
-
completion_tokens = 0
|
105
|
-
prompt_tokens = 0
|
106
|
-
|
107
|
-
content_list = []
|
108
|
-
reasoning_list = []
|
109
|
-
tool_calls = []
|
110
|
-
current_tool_call = None
|
111
|
-
|
112
|
-
# First pass: Get initial tool call data
|
113
|
-
for chunk in chunks:
|
114
|
-
if not hasattr(chunk, "choices") or not chunk.choices:
|
115
|
-
continue
|
116
|
-
|
117
|
-
delta = getattr(chunk.choices[0], "delta", None)
|
118
|
-
if not delta:
|
119
|
-
continue
|
120
|
-
|
121
|
-
# Handle content and reasoning
|
122
|
-
if hasattr(delta, "content") and delta.content:
|
123
|
-
content_list.append(delta.content)
|
124
|
-
if hasattr(delta, "reasoning_content") and delta.reasoning_content:
|
125
|
-
reasoning_list.append(delta.reasoning_content)
|
126
|
-
|
127
|
-
# Handle tool calls
|
128
|
-
if hasattr(delta, "tool_calls") and delta.tool_calls:
|
129
|
-
for tool_call_delta in delta.tool_calls:
|
130
|
-
if tool_call_delta.index is not None and tool_call_delta.id:
|
131
|
-
# Found the initial tool call
|
132
|
-
current_tool_call = {
|
133
|
-
"id": tool_call_delta.id,
|
134
|
-
"type": "function",
|
135
|
-
"function": {
|
136
|
-
"name": tool_call_delta.function.name,
|
137
|
-
"arguments": ""
|
138
|
-
}
|
139
|
-
}
|
140
|
-
while len(tool_calls) <= tool_call_delta.index:
|
141
|
-
tool_calls.append(None)
|
142
|
-
tool_calls[tool_call_delta.index] = current_tool_call
|
143
|
-
current_tool_call = tool_calls[tool_call_delta.index]
|
144
|
-
elif current_tool_call is not None and hasattr(tool_call_delta.function, "arguments"):
|
145
|
-
if tool_call_delta.function.arguments:
|
146
|
-
current_tool_call["function"]["arguments"] += tool_call_delta.function.arguments
|
147
|
-
|
148
|
-
# Remove any None values and empty tool calls
|
149
|
-
tool_calls = [tc for tc in tool_calls if tc and tc["id"] and tc["function"]["name"]]
|
150
|
-
|
151
|
-
combined_content = "".join(content_list) if content_list else ""
|
152
|
-
combined_reasoning = "".join(reasoning_list) if reasoning_list else None
|
153
|
-
finish_reason = getattr(last_chunk.choices[0], "finish_reason", None) if hasattr(last_chunk, "choices") and last_chunk.choices else None
|
154
|
-
|
155
|
-
# Create ToolCall objects
|
156
|
-
processed_tool_calls = []
|
157
|
-
if tool_calls:
|
158
|
-
try:
|
159
|
-
from openai.types.chat import ChatCompletionMessageToolCall
|
160
|
-
for tc in tool_calls:
|
161
|
-
tool_call = ChatCompletionMessageToolCall(
|
162
|
-
id=tc["id"],
|
163
|
-
type=tc["type"],
|
164
|
-
function={
|
165
|
-
"name": tc["function"]["name"],
|
166
|
-
"arguments": tc["function"]["arguments"]
|
167
|
-
}
|
168
|
-
)
|
169
|
-
processed_tool_calls.append(tool_call)
|
170
|
-
except Exception as e:
|
171
|
-
print(f"Error processing tool call: {e}")
|
172
|
-
|
173
|
-
message = ChatCompletionMessage(
|
174
|
-
content=combined_content,
|
175
|
-
role="assistant",
|
176
|
-
reasoning_content=combined_reasoning,
|
177
|
-
tool_calls=processed_tool_calls if processed_tool_calls else None
|
178
|
-
)
|
179
|
-
|
180
|
-
choice = Choice(
|
181
|
-
finish_reason=finish_reason or "tool_calls" if processed_tool_calls else None,
|
182
|
-
index=0,
|
183
|
-
message=message
|
184
|
-
)
|
185
|
-
|
186
|
-
usage = CompletionUsage(
|
187
|
-
completion_tokens=completion_tokens,
|
188
|
-
prompt_tokens=prompt_tokens,
|
189
|
-
total_tokens=completion_tokens + prompt_tokens,
|
190
|
-
completion_tokens_details=CompletionTokensDetails(),
|
191
|
-
prompt_tokens_details=PromptTokensDetails()
|
192
|
-
)
|
193
|
-
|
194
|
-
return ChatCompletion(
|
195
|
-
id=id,
|
196
|
-
choices=[choice],
|
197
|
-
created=created,
|
198
|
-
model=model,
|
199
|
-
system_fingerprint=system_fingerprint,
|
200
|
-
usage=usage
|
201
|
-
)
|
202
|
-
|
203
|
-
except Exception as e:
|
204
|
-
print(f"Error processing chunks: {e}")
|
205
|
-
return None
|
206
|
-
|
207
47
|
class Agent:
|
208
48
|
def _generate_tool_definition(self, function_name):
|
209
49
|
"""
|
@@ -513,6 +353,9 @@ class Agent:
|
|
513
353
|
self.instructions = instructions
|
514
354
|
# Check for model name in environment variable if not provided
|
515
355
|
self._using_custom_llm = False
|
356
|
+
|
357
|
+
# Initialize OpenAI client for direct API calls
|
358
|
+
self._openai_client = get_openai_client(api_key=api_key, base_url=base_url)
|
516
359
|
|
517
360
|
# If base_url is provided, always create a custom LLM instance
|
518
361
|
if base_url:
|
@@ -831,6 +674,110 @@ Your Goal: {self.goal}
|
|
831
674
|
|
832
675
|
return current_response
|
833
676
|
|
677
|
+
def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None):
|
678
|
+
"""Build messages list for chat completion.
|
679
|
+
|
680
|
+
Args:
|
681
|
+
prompt: The user prompt (str or list)
|
682
|
+
temperature: Temperature for the chat
|
683
|
+
output_json: Optional Pydantic model for JSON output
|
684
|
+
output_pydantic: Optional Pydantic model for JSON output (alias)
|
685
|
+
|
686
|
+
Returns:
|
687
|
+
tuple: (messages list, original prompt)
|
688
|
+
"""
|
689
|
+
# Build system prompt if enabled
|
690
|
+
system_prompt = None
|
691
|
+
if self.use_system_prompt:
|
692
|
+
system_prompt = f"""{self.backstory}\n
|
693
|
+
Your Role: {self.role}\n
|
694
|
+
Your Goal: {self.goal}
|
695
|
+
"""
|
696
|
+
|
697
|
+
# Use openai_client's build_messages method
|
698
|
+
messages, original_prompt = self._openai_client.build_messages(
|
699
|
+
prompt=prompt,
|
700
|
+
system_prompt=system_prompt,
|
701
|
+
chat_history=self.chat_history,
|
702
|
+
output_json=output_json,
|
703
|
+
output_pydantic=output_pydantic
|
704
|
+
)
|
705
|
+
|
706
|
+
return messages, original_prompt
|
707
|
+
|
708
|
+
def _format_tools_for_completion(self, tools=None):
|
709
|
+
"""Format tools for OpenAI completion API.
|
710
|
+
|
711
|
+
Supports:
|
712
|
+
- Pre-formatted OpenAI tools (dicts with type='function')
|
713
|
+
- Lists of pre-formatted tools
|
714
|
+
- Callable functions
|
715
|
+
- String function names
|
716
|
+
- Objects with to_openai_tool() method
|
717
|
+
|
718
|
+
Args:
|
719
|
+
tools: List of tools in various formats or None to use self.tools
|
720
|
+
|
721
|
+
Returns:
|
722
|
+
List of formatted tools or empty list
|
723
|
+
"""
|
724
|
+
if tools is None:
|
725
|
+
tools = self.tools
|
726
|
+
|
727
|
+
if not tools:
|
728
|
+
return []
|
729
|
+
|
730
|
+
formatted_tools = []
|
731
|
+
for tool in tools:
|
732
|
+
# Handle pre-formatted OpenAI tools
|
733
|
+
if isinstance(tool, dict) and tool.get('type') == 'function':
|
734
|
+
# Validate nested dictionary structure before accessing
|
735
|
+
if 'function' in tool and isinstance(tool['function'], dict) and 'name' in tool['function']:
|
736
|
+
formatted_tools.append(tool)
|
737
|
+
else:
|
738
|
+
logging.warning(f"Skipping malformed OpenAI tool: missing function or name")
|
739
|
+
# Handle lists of tools
|
740
|
+
elif isinstance(tool, list):
|
741
|
+
for subtool in tool:
|
742
|
+
if isinstance(subtool, dict) and subtool.get('type') == 'function':
|
743
|
+
# Validate nested dictionary structure before accessing
|
744
|
+
if 'function' in subtool and isinstance(subtool['function'], dict) and 'name' in subtool['function']:
|
745
|
+
formatted_tools.append(subtool)
|
746
|
+
else:
|
747
|
+
logging.warning(f"Skipping malformed OpenAI tool in list: missing function or name")
|
748
|
+
# Handle string tool names
|
749
|
+
elif isinstance(tool, str):
|
750
|
+
tool_def = self._generate_tool_definition(tool)
|
751
|
+
if tool_def:
|
752
|
+
formatted_tools.append(tool_def)
|
753
|
+
else:
|
754
|
+
logging.warning(f"Could not generate definition for tool: {tool}")
|
755
|
+
# Handle objects with to_openai_tool method (MCP tools)
|
756
|
+
elif hasattr(tool, "to_openai_tool"):
|
757
|
+
openai_tools = tool.to_openai_tool()
|
758
|
+
# MCP tools can return either a single tool or a list of tools
|
759
|
+
if isinstance(openai_tools, list):
|
760
|
+
formatted_tools.extend(openai_tools)
|
761
|
+
elif openai_tools is not None:
|
762
|
+
formatted_tools.append(openai_tools)
|
763
|
+
# Handle callable functions
|
764
|
+
elif callable(tool):
|
765
|
+
tool_def = self._generate_tool_definition(tool.__name__)
|
766
|
+
if tool_def:
|
767
|
+
formatted_tools.append(tool_def)
|
768
|
+
else:
|
769
|
+
logging.warning(f"Tool {tool} not recognized")
|
770
|
+
|
771
|
+
# Validate JSON serialization before returning
|
772
|
+
if formatted_tools:
|
773
|
+
try:
|
774
|
+
json.dumps(formatted_tools) # Validate serialization
|
775
|
+
except (TypeError, ValueError) as e:
|
776
|
+
logging.error(f"Tools are not JSON serializable: {e}")
|
777
|
+
return []
|
778
|
+
|
779
|
+
return formatted_tools
|
780
|
+
|
834
781
|
def generate_task(self) -> 'Task':
|
835
782
|
"""Generate a Task object from the agent's instructions"""
|
836
783
|
from ..task.task import Task
|
@@ -996,75 +943,23 @@ Your Goal: {self.goal}
|
|
996
943
|
|
997
944
|
def _process_stream_response(self, messages, temperature, start_time, formatted_tools=None, reasoning_steps=False):
|
998
945
|
"""Process streaming response and return final response"""
|
999
|
-
|
1000
|
-
|
1001
|
-
|
1002
|
-
|
1003
|
-
|
1004
|
-
|
1005
|
-
|
1006
|
-
|
1007
|
-
|
1008
|
-
|
1009
|
-
full_response_text = ""
|
1010
|
-
reasoning_content = ""
|
1011
|
-
chunks = []
|
1012
|
-
|
1013
|
-
# Create Live display with proper configuration
|
1014
|
-
with Live(
|
1015
|
-
display_generating("", start_time),
|
1016
|
-
console=self.console,
|
1017
|
-
refresh_per_second=4,
|
1018
|
-
transient=True,
|
1019
|
-
vertical_overflow="ellipsis",
|
1020
|
-
auto_refresh=True
|
1021
|
-
) as live:
|
1022
|
-
for chunk in response_stream:
|
1023
|
-
chunks.append(chunk)
|
1024
|
-
if chunk.choices[0].delta.content:
|
1025
|
-
full_response_text += chunk.choices[0].delta.content
|
1026
|
-
live.update(display_generating(full_response_text, start_time))
|
1027
|
-
|
1028
|
-
# Update live display with reasoning content if enabled
|
1029
|
-
if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
|
1030
|
-
rc = chunk.choices[0].delta.reasoning_content
|
1031
|
-
if rc:
|
1032
|
-
reasoning_content += rc
|
1033
|
-
live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
|
1034
|
-
|
1035
|
-
# Clear the last generating display with a blank line
|
1036
|
-
self.console.print()
|
1037
|
-
final_response = process_stream_chunks(chunks)
|
1038
|
-
return final_response
|
1039
|
-
|
1040
|
-
except Exception as e:
|
1041
|
-
display_error(f"Error in stream processing: {e}")
|
1042
|
-
return None
|
946
|
+
return self._openai_client.process_stream_response(
|
947
|
+
messages=messages,
|
948
|
+
model=self.llm,
|
949
|
+
temperature=temperature,
|
950
|
+
tools=formatted_tools,
|
951
|
+
start_time=start_time,
|
952
|
+
console=self.console,
|
953
|
+
display_fn=display_generating,
|
954
|
+
reasoning_steps=reasoning_steps
|
955
|
+
)
|
1043
956
|
|
1044
957
|
def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
|
1045
958
|
start_time = time.time()
|
1046
959
|
logging.debug(f"{self.name} sending messages to LLM: {messages}")
|
1047
960
|
|
1048
|
-
|
1049
|
-
|
1050
|
-
tools = self.tools
|
1051
|
-
if tools:
|
1052
|
-
for tool in tools:
|
1053
|
-
if isinstance(tool, str):
|
1054
|
-
# Generate tool definition for string tool names
|
1055
|
-
tool_def = self._generate_tool_definition(tool)
|
1056
|
-
if tool_def:
|
1057
|
-
formatted_tools.append(tool_def)
|
1058
|
-
else:
|
1059
|
-
logging.warning(f"Could not generate definition for tool: {tool}")
|
1060
|
-
elif isinstance(tool, dict):
|
1061
|
-
formatted_tools.append(tool)
|
1062
|
-
elif hasattr(tool, "to_openai_tool"):
|
1063
|
-
formatted_tools.append(tool.to_openai_tool())
|
1064
|
-
elif callable(tool):
|
1065
|
-
formatted_tools.append(self._generate_tool_definition(tool.__name__))
|
1066
|
-
else:
|
1067
|
-
logging.warning(f"Tool {tool} not recognized")
|
961
|
+
# Use the new _format_tools_for_completion helper method
|
962
|
+
formatted_tools = self._format_tools_for_completion(tools)
|
1068
963
|
|
1069
964
|
try:
|
1070
965
|
# Use the custom LLM instance if available
|
@@ -1106,93 +1001,27 @@ Your Goal: {self.goal}
|
|
1106
1001
|
reasoning_steps=reasoning_steps
|
1107
1002
|
)
|
1108
1003
|
else:
|
1109
|
-
# Use the standard OpenAI client approach
|
1110
|
-
|
1111
|
-
|
1112
|
-
|
1004
|
+
# Use the standard OpenAI client approach with tool support
|
1005
|
+
def custom_display_fn(text, start_time):
|
1006
|
+
if self.verbose:
|
1007
|
+
return display_generating(text, start_time)
|
1008
|
+
return ""
|
1113
1009
|
|
1114
|
-
|
1115
|
-
|
1116
|
-
|
1117
|
-
|
1118
|
-
|
1119
|
-
|
1120
|
-
|
1121
|
-
|
1122
|
-
|
1123
|
-
|
1124
|
-
else
|
1125
|
-
|
1126
|
-
|
1127
|
-
|
1128
|
-
|
1129
|
-
temperature=temperature,
|
1130
|
-
tools=formatted_tools if formatted_tools else None,
|
1131
|
-
stream=False
|
1132
|
-
)
|
1133
|
-
|
1134
|
-
tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
|
1135
|
-
|
1136
|
-
if tool_calls:
|
1137
|
-
messages.append({
|
1138
|
-
"role": "assistant",
|
1139
|
-
"content": final_response.choices[0].message.content,
|
1140
|
-
"tool_calls": tool_calls
|
1141
|
-
})
|
1142
|
-
|
1143
|
-
for tool_call in tool_calls:
|
1144
|
-
function_name = tool_call.function.name
|
1145
|
-
arguments = json.loads(tool_call.function.arguments)
|
1146
|
-
|
1147
|
-
if self.verbose:
|
1148
|
-
display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
|
1149
|
-
|
1150
|
-
tool_result = self.execute_tool(function_name, arguments)
|
1151
|
-
results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
|
1152
|
-
|
1153
|
-
if self.verbose:
|
1154
|
-
display_tool_call(f"Function '{function_name}' returned: {results_str}")
|
1155
|
-
|
1156
|
-
messages.append({
|
1157
|
-
"role": "tool",
|
1158
|
-
"tool_call_id": tool_call.id,
|
1159
|
-
"content": results_str
|
1160
|
-
})
|
1161
|
-
|
1162
|
-
# Check if we should continue (for tools like sequential thinking)
|
1163
|
-
should_continue = False
|
1164
|
-
for tool_call in tool_calls:
|
1165
|
-
function_name = tool_call.function.name
|
1166
|
-
arguments = json.loads(tool_call.function.arguments)
|
1167
|
-
|
1168
|
-
# For sequential thinking tool, check if nextThoughtNeeded is True
|
1169
|
-
if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
|
1170
|
-
should_continue = True
|
1171
|
-
break
|
1172
|
-
|
1173
|
-
if not should_continue:
|
1174
|
-
# Get final response after tool calls
|
1175
|
-
if stream:
|
1176
|
-
final_response = self._process_stream_response(
|
1177
|
-
messages,
|
1178
|
-
temperature,
|
1179
|
-
start_time,
|
1180
|
-
formatted_tools=formatted_tools if formatted_tools else None,
|
1181
|
-
reasoning_steps=reasoning_steps
|
1182
|
-
)
|
1183
|
-
else:
|
1184
|
-
final_response = client.chat.completions.create(
|
1185
|
-
model=self.llm,
|
1186
|
-
messages=messages,
|
1187
|
-
temperature=temperature,
|
1188
|
-
stream=False
|
1189
|
-
)
|
1190
|
-
break
|
1191
|
-
|
1192
|
-
iteration_count += 1
|
1193
|
-
else:
|
1194
|
-
# No tool calls, we're done
|
1195
|
-
break
|
1010
|
+
# Note: openai_client expects tools in various formats and will format them internally
|
1011
|
+
# But since we already have formatted_tools, we can pass them directly
|
1012
|
+
final_response = self._openai_client.chat_completion_with_tools(
|
1013
|
+
messages=messages,
|
1014
|
+
model=self.llm,
|
1015
|
+
temperature=temperature,
|
1016
|
+
tools=formatted_tools, # Already formatted for OpenAI
|
1017
|
+
execute_tool_fn=self.execute_tool,
|
1018
|
+
stream=stream,
|
1019
|
+
console=self.console if self.verbose else None,
|
1020
|
+
display_fn=display_generating if stream and self.verbose else None,
|
1021
|
+
reasoning_steps=reasoning_steps,
|
1022
|
+
verbose=self.verbose,
|
1023
|
+
max_iterations=10
|
1024
|
+
)
|
1196
1025
|
|
1197
1026
|
return final_response
|
1198
1027
|
|
@@ -1297,40 +1126,8 @@ Your Goal: {self.goal}
|
|
1297
1126
|
display_error(f"Error in LLM chat: {e}")
|
1298
1127
|
return None
|
1299
1128
|
else:
|
1300
|
-
|
1301
|
-
|
1302
|
-
Your Role: {self.role}\n
|
1303
|
-
Your Goal: {self.goal}
|
1304
|
-
"""
|
1305
|
-
if output_json:
|
1306
|
-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
|
1307
|
-
elif output_pydantic:
|
1308
|
-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
|
1309
|
-
else:
|
1310
|
-
system_prompt = None
|
1311
|
-
|
1312
|
-
messages = []
|
1313
|
-
if system_prompt:
|
1314
|
-
messages.append({"role": "system", "content": system_prompt})
|
1315
|
-
messages.extend(self.chat_history)
|
1316
|
-
|
1317
|
-
# Modify prompt if output_json or output_pydantic is specified
|
1318
|
-
original_prompt = prompt
|
1319
|
-
if output_json or output_pydantic:
|
1320
|
-
if isinstance(prompt, str):
|
1321
|
-
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
1322
|
-
elif isinstance(prompt, list):
|
1323
|
-
# For multimodal prompts, append to the text content
|
1324
|
-
for item in prompt:
|
1325
|
-
if item["type"] == "text":
|
1326
|
-
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
1327
|
-
break
|
1328
|
-
|
1329
|
-
if isinstance(prompt, list):
|
1330
|
-
# If we receive a multimodal prompt list, place it directly in the user message
|
1331
|
-
messages.append({"role": "user", "content": prompt})
|
1332
|
-
else:
|
1333
|
-
messages.append({"role": "user", "content": prompt})
|
1129
|
+
# Use the new _build_messages helper method
|
1130
|
+
messages, original_prompt = self._build_messages(prompt, temperature, output_json, output_pydantic)
|
1334
1131
|
|
1335
1132
|
final_response_text = None
|
1336
1133
|
reflection_count = 0
|
@@ -1405,7 +1202,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1405
1202
|
messages.append({"role": "user", "content": reflection_prompt})
|
1406
1203
|
|
1407
1204
|
try:
|
1408
|
-
reflection_response =
|
1205
|
+
reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
|
1409
1206
|
model=self.reflect_llm if self.reflect_llm else self.llm,
|
1410
1207
|
messages=messages,
|
1411
1208
|
temperature=temperature,
|
@@ -1566,38 +1363,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1566
1363
|
return None
|
1567
1364
|
|
1568
1365
|
# For OpenAI client
|
1569
|
-
|
1570
|
-
|
1571
|
-
Your Role: {self.role}\n
|
1572
|
-
Your Goal: {self.goal}
|
1573
|
-
"""
|
1574
|
-
if output_json:
|
1575
|
-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
|
1576
|
-
elif output_pydantic:
|
1577
|
-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
|
1578
|
-
else:
|
1579
|
-
system_prompt = None
|
1580
|
-
|
1581
|
-
messages = []
|
1582
|
-
if system_prompt:
|
1583
|
-
messages.append({"role": "system", "content": system_prompt})
|
1584
|
-
messages.extend(self.chat_history)
|
1585
|
-
|
1586
|
-
# Modify prompt if output_json or output_pydantic is specified
|
1587
|
-
original_prompt = prompt
|
1588
|
-
if output_json or output_pydantic:
|
1589
|
-
if isinstance(prompt, str):
|
1590
|
-
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
1591
|
-
elif isinstance(prompt, list):
|
1592
|
-
for item in prompt:
|
1593
|
-
if item["type"] == "text":
|
1594
|
-
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
1595
|
-
break
|
1596
|
-
|
1597
|
-
if isinstance(prompt, list):
|
1598
|
-
messages.append({"role": "user", "content": prompt})
|
1599
|
-
else:
|
1600
|
-
messages.append({"role": "user", "content": prompt})
|
1366
|
+
# Use the new _build_messages helper method
|
1367
|
+
messages, original_prompt = self._build_messages(prompt, temperature, output_json, output_pydantic)
|
1601
1368
|
|
1602
1369
|
reflection_count = 0
|
1603
1370
|
start_time = time.time()
|
@@ -1619,27 +1386,12 @@ Your Goal: {self.goal}
|
|
1619
1386
|
agent_tools=agent_tools
|
1620
1387
|
)
|
1621
1388
|
|
1622
|
-
#
|
1623
|
-
formatted_tools =
|
1624
|
-
if tools:
|
1625
|
-
for tool in tools:
|
1626
|
-
if isinstance(tool, str):
|
1627
|
-
tool_def = self._generate_tool_definition(tool)
|
1628
|
-
if tool_def:
|
1629
|
-
formatted_tools.append(tool_def)
|
1630
|
-
elif isinstance(tool, dict):
|
1631
|
-
formatted_tools.append(tool)
|
1632
|
-
elif hasattr(tool, "to_openai_tool"):
|
1633
|
-
formatted_tools.append(tool.to_openai_tool())
|
1634
|
-
elif callable(tool):
|
1635
|
-
formatted_tools.append(self._generate_tool_definition(tool.__name__))
|
1636
|
-
|
1637
|
-
# Create async OpenAI client
|
1638
|
-
async_client = AsyncOpenAI()
|
1389
|
+
# Use the new _format_tools_for_completion helper method
|
1390
|
+
formatted_tools = self._format_tools_for_completion(tools)
|
1639
1391
|
|
1640
1392
|
# Make the API call based on the type of request
|
1641
1393
|
if tools:
|
1642
|
-
response = await async_client.chat.completions.create(
|
1394
|
+
response = await self._openai_client.async_client.chat.completions.create(
|
1643
1395
|
model=self.llm,
|
1644
1396
|
messages=messages,
|
1645
1397
|
temperature=temperature,
|
@@ -1651,7 +1403,7 @@ Your Goal: {self.goal}
|
|
1651
1403
|
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1652
1404
|
return result
|
1653
1405
|
elif output_json or output_pydantic:
|
1654
|
-
response = await async_client.chat.completions.create(
|
1406
|
+
response = await self._openai_client.async_client.chat.completions.create(
|
1655
1407
|
model=self.llm,
|
1656
1408
|
messages=messages,
|
1657
1409
|
temperature=temperature,
|
@@ -1663,7 +1415,7 @@ Your Goal: {self.goal}
|
|
1663
1415
|
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1664
1416
|
return response.choices[0].message.content
|
1665
1417
|
else:
|
1666
|
-
response = await async_client.chat.completions.create(
|
1418
|
+
response = await self._openai_client.async_client.chat.completions.create(
|
1667
1419
|
model=self.llm,
|
1668
1420
|
messages=messages,
|
1669
1421
|
temperature=temperature
|
@@ -1690,7 +1442,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1690
1442
|
]
|
1691
1443
|
|
1692
1444
|
try:
|
1693
|
-
reflection_response = await async_client.beta.chat.completions.parse(
|
1445
|
+
reflection_response = await self._openai_client.async_client.beta.chat.completions.parse(
|
1694
1446
|
model=self.reflect_llm if self.reflect_llm else self.llm,
|
1695
1447
|
messages=reflection_messages,
|
1696
1448
|
temperature=temperature,
|
@@ -1720,7 +1472,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1720
1472
|
{"role": "user", "content": "Now regenerate your response using the reflection you made"}
|
1721
1473
|
]
|
1722
1474
|
|
1723
|
-
new_response = await async_client.chat.completions.create(
|
1475
|
+
new_response = await self._openai_client.async_client.chat.completions.create(
|
1724
1476
|
model=self.llm,
|
1725
1477
|
messages=regenerate_messages,
|
1726
1478
|
temperature=temperature
|
@@ -1796,8 +1548,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1796
1548
|
{"role": "user", "content": formatted_results + "\nPlease process these results and provide a final response."}
|
1797
1549
|
]
|
1798
1550
|
try:
|
1799
|
-
|
1800
|
-
final_response = await async_client.chat.completions.create(
|
1551
|
+
final_response = await self._openai_client.async_client.chat.completions.create(
|
1801
1552
|
model=self.llm,
|
1802
1553
|
messages=messages,
|
1803
1554
|
temperature=0.2,
|