praisonaiagents 0.0.112__py3-none-any.whl → 0.0.114__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +187 -124
- praisonaiagents/llm/__init__.py +2 -1
- praisonaiagents/llm/llm.py +100 -124
- praisonaiagents/llm/openai_client.py +260 -0
- {praisonaiagents-0.0.112.dist-info → praisonaiagents-0.0.114.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.112.dist-info → praisonaiagents-0.0.114.dist-info}/RECORD +8 -7
- {praisonaiagents-0.0.112.dist-info → praisonaiagents-0.0.114.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.112.dist-info → praisonaiagents-0.0.114.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1
1
|
import os
|
2
2
|
import time
|
3
3
|
import json
|
4
|
+
import copy
|
4
5
|
import logging
|
5
6
|
import asyncio
|
6
7
|
from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING, Callable, Tuple
|
7
8
|
from rich.console import Console
|
8
9
|
from rich.live import Live
|
9
|
-
from
|
10
|
+
from ..llm import get_openai_client
|
10
11
|
from ..main import (
|
11
12
|
display_error,
|
12
13
|
display_tool_call,
|
@@ -15,7 +16,6 @@ from ..main import (
|
|
15
16
|
display_generating,
|
16
17
|
display_self_reflection,
|
17
18
|
ReflectionOutput,
|
18
|
-
client,
|
19
19
|
adisplay_instruction,
|
20
20
|
approval_callback
|
21
21
|
)
|
@@ -85,6 +85,13 @@ class ChatCompletion:
|
|
85
85
|
service_tier: Optional[str] = None
|
86
86
|
usage: Optional[CompletionUsage] = None
|
87
87
|
|
88
|
+
@dataclass
|
89
|
+
class ToolCall:
|
90
|
+
"""Tool call representation compatible with OpenAI format"""
|
91
|
+
id: str
|
92
|
+
type: str
|
93
|
+
function: Dict[str, Any]
|
94
|
+
|
88
95
|
def process_stream_chunks(chunks):
|
89
96
|
"""Process streaming chunks into combined response"""
|
90
97
|
if not chunks:
|
@@ -156,9 +163,8 @@ def process_stream_chunks(chunks):
|
|
156
163
|
processed_tool_calls = []
|
157
164
|
if tool_calls:
|
158
165
|
try:
|
159
|
-
from openai.types.chat import ChatCompletionMessageToolCall
|
160
166
|
for tc in tool_calls:
|
161
|
-
tool_call =
|
167
|
+
tool_call = ToolCall(
|
162
168
|
id=tc["id"],
|
163
169
|
type=tc["type"],
|
164
170
|
function={
|
@@ -513,6 +519,9 @@ class Agent:
|
|
513
519
|
self.instructions = instructions
|
514
520
|
# Check for model name in environment variable if not provided
|
515
521
|
self._using_custom_llm = False
|
522
|
+
|
523
|
+
# Initialize OpenAI client for direct API calls
|
524
|
+
self._openai_client = get_openai_client(api_key=api_key, base_url=base_url)
|
516
525
|
|
517
526
|
# If base_url is provided, always create a custom LLM instance
|
518
527
|
if base_url:
|
@@ -831,6 +840,132 @@ Your Goal: {self.goal}
|
|
831
840
|
|
832
841
|
return current_response
|
833
842
|
|
843
|
+
def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None):
|
844
|
+
"""Build messages list for chat completion.
|
845
|
+
|
846
|
+
Args:
|
847
|
+
prompt: The user prompt (str or list)
|
848
|
+
temperature: Temperature for the chat
|
849
|
+
output_json: Optional Pydantic model for JSON output
|
850
|
+
output_pydantic: Optional Pydantic model for JSON output (alias)
|
851
|
+
|
852
|
+
Returns:
|
853
|
+
tuple: (messages list, original prompt)
|
854
|
+
"""
|
855
|
+
messages = []
|
856
|
+
|
857
|
+
# Build system prompt if enabled
|
858
|
+
system_prompt = None
|
859
|
+
if self.use_system_prompt:
|
860
|
+
system_prompt = f"""{self.backstory}\n
|
861
|
+
Your Role: {self.role}\n
|
862
|
+
Your Goal: {self.goal}
|
863
|
+
"""
|
864
|
+
if output_json:
|
865
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
|
866
|
+
elif output_pydantic:
|
867
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
|
868
|
+
|
869
|
+
messages.append({"role": "system", "content": system_prompt})
|
870
|
+
|
871
|
+
# Add chat history
|
872
|
+
messages.extend(self.chat_history)
|
873
|
+
|
874
|
+
# Handle prompt modifications for JSON output
|
875
|
+
original_prompt = prompt
|
876
|
+
if output_json or output_pydantic:
|
877
|
+
if isinstance(prompt, str):
|
878
|
+
prompt = prompt + "\nReturn ONLY a valid JSON object. No other text or explanation."
|
879
|
+
elif isinstance(prompt, list):
|
880
|
+
# Create a deep copy to avoid modifying the original
|
881
|
+
prompt = copy.deepcopy(prompt)
|
882
|
+
for item in prompt:
|
883
|
+
if item.get("type") == "text":
|
884
|
+
item["text"] = item["text"] + "\nReturn ONLY a valid JSON object. No other text or explanation."
|
885
|
+
break
|
886
|
+
|
887
|
+
# Add prompt to messages
|
888
|
+
if isinstance(prompt, list):
|
889
|
+
# If we receive a multimodal prompt list, place it directly in the user message
|
890
|
+
messages.append({"role": "user", "content": prompt})
|
891
|
+
else:
|
892
|
+
messages.append({"role": "user", "content": prompt})
|
893
|
+
|
894
|
+
return messages, original_prompt
|
895
|
+
|
896
|
+
def _format_tools_for_completion(self, tools=None):
|
897
|
+
"""Format tools for OpenAI completion API.
|
898
|
+
|
899
|
+
Supports:
|
900
|
+
- Pre-formatted OpenAI tools (dicts with type='function')
|
901
|
+
- Lists of pre-formatted tools
|
902
|
+
- Callable functions
|
903
|
+
- String function names
|
904
|
+
- Objects with to_openai_tool() method
|
905
|
+
|
906
|
+
Args:
|
907
|
+
tools: List of tools in various formats or None to use self.tools
|
908
|
+
|
909
|
+
Returns:
|
910
|
+
List of formatted tools or empty list
|
911
|
+
"""
|
912
|
+
if tools is None:
|
913
|
+
tools = self.tools
|
914
|
+
|
915
|
+
if not tools:
|
916
|
+
return []
|
917
|
+
|
918
|
+
formatted_tools = []
|
919
|
+
for tool in tools:
|
920
|
+
# Handle pre-formatted OpenAI tools
|
921
|
+
if isinstance(tool, dict) and tool.get('type') == 'function':
|
922
|
+
# Validate nested dictionary structure before accessing
|
923
|
+
if 'function' in tool and isinstance(tool['function'], dict) and 'name' in tool['function']:
|
924
|
+
formatted_tools.append(tool)
|
925
|
+
else:
|
926
|
+
logging.warning(f"Skipping malformed OpenAI tool: missing function or name")
|
927
|
+
# Handle lists of tools
|
928
|
+
elif isinstance(tool, list):
|
929
|
+
for subtool in tool:
|
930
|
+
if isinstance(subtool, dict) and subtool.get('type') == 'function':
|
931
|
+
# Validate nested dictionary structure before accessing
|
932
|
+
if 'function' in subtool and isinstance(subtool['function'], dict) and 'name' in subtool['function']:
|
933
|
+
formatted_tools.append(subtool)
|
934
|
+
else:
|
935
|
+
logging.warning(f"Skipping malformed OpenAI tool in list: missing function or name")
|
936
|
+
# Handle string tool names
|
937
|
+
elif isinstance(tool, str):
|
938
|
+
tool_def = self._generate_tool_definition(tool)
|
939
|
+
if tool_def:
|
940
|
+
formatted_tools.append(tool_def)
|
941
|
+
else:
|
942
|
+
logging.warning(f"Could not generate definition for tool: {tool}")
|
943
|
+
# Handle objects with to_openai_tool method (MCP tools)
|
944
|
+
elif hasattr(tool, "to_openai_tool"):
|
945
|
+
openai_tools = tool.to_openai_tool()
|
946
|
+
# MCP tools can return either a single tool or a list of tools
|
947
|
+
if isinstance(openai_tools, list):
|
948
|
+
formatted_tools.extend(openai_tools)
|
949
|
+
elif openai_tools is not None:
|
950
|
+
formatted_tools.append(openai_tools)
|
951
|
+
# Handle callable functions
|
952
|
+
elif callable(tool):
|
953
|
+
tool_def = self._generate_tool_definition(tool.__name__)
|
954
|
+
if tool_def:
|
955
|
+
formatted_tools.append(tool_def)
|
956
|
+
else:
|
957
|
+
logging.warning(f"Tool {tool} not recognized")
|
958
|
+
|
959
|
+
# Validate JSON serialization before returning
|
960
|
+
if formatted_tools:
|
961
|
+
try:
|
962
|
+
json.dumps(formatted_tools) # Validate serialization
|
963
|
+
except (TypeError, ValueError) as e:
|
964
|
+
logging.error(f"Tools are not JSON serializable: {e}")
|
965
|
+
return []
|
966
|
+
|
967
|
+
return formatted_tools
|
968
|
+
|
834
969
|
def generate_task(self) -> 'Task':
|
835
970
|
"""Generate a Task object from the agent's instructions"""
|
836
971
|
from ..task.task import Task
|
@@ -998,7 +1133,7 @@ Your Goal: {self.goal}
|
|
998
1133
|
"""Process streaming response and return final response"""
|
999
1134
|
try:
|
1000
1135
|
# Create the response stream
|
1001
|
-
response_stream =
|
1136
|
+
response_stream = self._openai_client.sync_client.chat.completions.create(
|
1002
1137
|
model=self.llm,
|
1003
1138
|
messages=messages,
|
1004
1139
|
temperature=temperature,
|
@@ -1045,26 +1180,8 @@ Your Goal: {self.goal}
|
|
1045
1180
|
start_time = time.time()
|
1046
1181
|
logging.debug(f"{self.name} sending messages to LLM: {messages}")
|
1047
1182
|
|
1048
|
-
|
1049
|
-
|
1050
|
-
tools = self.tools
|
1051
|
-
if tools:
|
1052
|
-
for tool in tools:
|
1053
|
-
if isinstance(tool, str):
|
1054
|
-
# Generate tool definition for string tool names
|
1055
|
-
tool_def = self._generate_tool_definition(tool)
|
1056
|
-
if tool_def:
|
1057
|
-
formatted_tools.append(tool_def)
|
1058
|
-
else:
|
1059
|
-
logging.warning(f"Could not generate definition for tool: {tool}")
|
1060
|
-
elif isinstance(tool, dict):
|
1061
|
-
formatted_tools.append(tool)
|
1062
|
-
elif hasattr(tool, "to_openai_tool"):
|
1063
|
-
formatted_tools.append(tool.to_openai_tool())
|
1064
|
-
elif callable(tool):
|
1065
|
-
formatted_tools.append(self._generate_tool_definition(tool.__name__))
|
1066
|
-
else:
|
1067
|
-
logging.warning(f"Tool {tool} not recognized")
|
1183
|
+
# Use the new _format_tools_for_completion helper method
|
1184
|
+
formatted_tools = self._format_tools_for_completion(tools)
|
1068
1185
|
|
1069
1186
|
try:
|
1070
1187
|
# Use the custom LLM instance if available
|
@@ -1123,7 +1240,7 @@ Your Goal: {self.goal}
|
|
1123
1240
|
)
|
1124
1241
|
else:
|
1125
1242
|
# Process as regular non-streaming response
|
1126
|
-
final_response =
|
1243
|
+
final_response = self._openai_client.sync_client.chat.completions.create(
|
1127
1244
|
model=self.llm,
|
1128
1245
|
messages=messages,
|
1129
1246
|
temperature=temperature,
|
@@ -1134,15 +1251,34 @@ Your Goal: {self.goal}
|
|
1134
1251
|
tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
|
1135
1252
|
|
1136
1253
|
if tool_calls:
|
1254
|
+
# Convert ToolCall dataclass objects to dict for JSON serialization
|
1255
|
+
serializable_tool_calls = []
|
1256
|
+
for tc in tool_calls:
|
1257
|
+
if isinstance(tc, ToolCall):
|
1258
|
+
# Convert dataclass to dict
|
1259
|
+
serializable_tool_calls.append({
|
1260
|
+
"id": tc.id,
|
1261
|
+
"type": tc.type,
|
1262
|
+
"function": tc.function
|
1263
|
+
})
|
1264
|
+
else:
|
1265
|
+
# Already an OpenAI object, keep as is
|
1266
|
+
serializable_tool_calls.append(tc)
|
1267
|
+
|
1137
1268
|
messages.append({
|
1138
1269
|
"role": "assistant",
|
1139
1270
|
"content": final_response.choices[0].message.content,
|
1140
|
-
"tool_calls":
|
1271
|
+
"tool_calls": serializable_tool_calls
|
1141
1272
|
})
|
1142
1273
|
|
1143
1274
|
for tool_call in tool_calls:
|
1144
|
-
|
1145
|
-
|
1275
|
+
# Handle both ToolCall dataclass and OpenAI object
|
1276
|
+
if isinstance(tool_call, ToolCall):
|
1277
|
+
function_name = tool_call.function["name"]
|
1278
|
+
arguments = json.loads(tool_call.function["arguments"])
|
1279
|
+
else:
|
1280
|
+
function_name = tool_call.function.name
|
1281
|
+
arguments = json.loads(tool_call.function.arguments)
|
1146
1282
|
|
1147
1283
|
if self.verbose:
|
1148
1284
|
display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
|
@@ -1155,15 +1291,20 @@ Your Goal: {self.goal}
|
|
1155
1291
|
|
1156
1292
|
messages.append({
|
1157
1293
|
"role": "tool",
|
1158
|
-
"tool_call_id": tool_call.id,
|
1294
|
+
"tool_call_id": tool_call.id if hasattr(tool_call, 'id') else tool_call['id'],
|
1159
1295
|
"content": results_str
|
1160
1296
|
})
|
1161
1297
|
|
1162
1298
|
# Check if we should continue (for tools like sequential thinking)
|
1163
1299
|
should_continue = False
|
1164
1300
|
for tool_call in tool_calls:
|
1165
|
-
|
1166
|
-
|
1301
|
+
# Handle both ToolCall dataclass and OpenAI object
|
1302
|
+
if isinstance(tool_call, ToolCall):
|
1303
|
+
function_name = tool_call.function["name"]
|
1304
|
+
arguments = json.loads(tool_call.function["arguments"])
|
1305
|
+
else:
|
1306
|
+
function_name = tool_call.function.name
|
1307
|
+
arguments = json.loads(tool_call.function.arguments)
|
1167
1308
|
|
1168
1309
|
# For sequential thinking tool, check if nextThoughtNeeded is True
|
1169
1310
|
if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
|
@@ -1181,7 +1322,7 @@ Your Goal: {self.goal}
|
|
1181
1322
|
reasoning_steps=reasoning_steps
|
1182
1323
|
)
|
1183
1324
|
else:
|
1184
|
-
final_response =
|
1325
|
+
final_response = self._openai_client.sync_client.chat.completions.create(
|
1185
1326
|
model=self.llm,
|
1186
1327
|
messages=messages,
|
1187
1328
|
temperature=temperature,
|
@@ -1297,40 +1438,8 @@ Your Goal: {self.goal}
|
|
1297
1438
|
display_error(f"Error in LLM chat: {e}")
|
1298
1439
|
return None
|
1299
1440
|
else:
|
1300
|
-
|
1301
|
-
|
1302
|
-
Your Role: {self.role}\n
|
1303
|
-
Your Goal: {self.goal}
|
1304
|
-
"""
|
1305
|
-
if output_json:
|
1306
|
-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
|
1307
|
-
elif output_pydantic:
|
1308
|
-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
|
1309
|
-
else:
|
1310
|
-
system_prompt = None
|
1311
|
-
|
1312
|
-
messages = []
|
1313
|
-
if system_prompt:
|
1314
|
-
messages.append({"role": "system", "content": system_prompt})
|
1315
|
-
messages.extend(self.chat_history)
|
1316
|
-
|
1317
|
-
# Modify prompt if output_json or output_pydantic is specified
|
1318
|
-
original_prompt = prompt
|
1319
|
-
if output_json or output_pydantic:
|
1320
|
-
if isinstance(prompt, str):
|
1321
|
-
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
1322
|
-
elif isinstance(prompt, list):
|
1323
|
-
# For multimodal prompts, append to the text content
|
1324
|
-
for item in prompt:
|
1325
|
-
if item["type"] == "text":
|
1326
|
-
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
1327
|
-
break
|
1328
|
-
|
1329
|
-
if isinstance(prompt, list):
|
1330
|
-
# If we receive a multimodal prompt list, place it directly in the user message
|
1331
|
-
messages.append({"role": "user", "content": prompt})
|
1332
|
-
else:
|
1333
|
-
messages.append({"role": "user", "content": prompt})
|
1441
|
+
# Use the new _build_messages helper method
|
1442
|
+
messages, original_prompt = self._build_messages(prompt, temperature, output_json, output_pydantic)
|
1334
1443
|
|
1335
1444
|
final_response_text = None
|
1336
1445
|
reflection_count = 0
|
@@ -1405,7 +1514,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1405
1514
|
messages.append({"role": "user", "content": reflection_prompt})
|
1406
1515
|
|
1407
1516
|
try:
|
1408
|
-
reflection_response =
|
1517
|
+
reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
|
1409
1518
|
model=self.reflect_llm if self.reflect_llm else self.llm,
|
1410
1519
|
messages=messages,
|
1411
1520
|
temperature=temperature,
|
@@ -1566,38 +1675,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1566
1675
|
return None
|
1567
1676
|
|
1568
1677
|
# For OpenAI client
|
1569
|
-
|
1570
|
-
|
1571
|
-
Your Role: {self.role}\n
|
1572
|
-
Your Goal: {self.goal}
|
1573
|
-
"""
|
1574
|
-
if output_json:
|
1575
|
-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
|
1576
|
-
elif output_pydantic:
|
1577
|
-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
|
1578
|
-
else:
|
1579
|
-
system_prompt = None
|
1580
|
-
|
1581
|
-
messages = []
|
1582
|
-
if system_prompt:
|
1583
|
-
messages.append({"role": "system", "content": system_prompt})
|
1584
|
-
messages.extend(self.chat_history)
|
1585
|
-
|
1586
|
-
# Modify prompt if output_json or output_pydantic is specified
|
1587
|
-
original_prompt = prompt
|
1588
|
-
if output_json or output_pydantic:
|
1589
|
-
if isinstance(prompt, str):
|
1590
|
-
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
1591
|
-
elif isinstance(prompt, list):
|
1592
|
-
for item in prompt:
|
1593
|
-
if item["type"] == "text":
|
1594
|
-
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
1595
|
-
break
|
1596
|
-
|
1597
|
-
if isinstance(prompt, list):
|
1598
|
-
messages.append({"role": "user", "content": prompt})
|
1599
|
-
else:
|
1600
|
-
messages.append({"role": "user", "content": prompt})
|
1678
|
+
# Use the new _build_messages helper method
|
1679
|
+
messages, original_prompt = self._build_messages(prompt, temperature, output_json, output_pydantic)
|
1601
1680
|
|
1602
1681
|
reflection_count = 0
|
1603
1682
|
start_time = time.time()
|
@@ -1619,27 +1698,12 @@ Your Goal: {self.goal}
|
|
1619
1698
|
agent_tools=agent_tools
|
1620
1699
|
)
|
1621
1700
|
|
1622
|
-
#
|
1623
|
-
formatted_tools =
|
1624
|
-
if tools:
|
1625
|
-
for tool in tools:
|
1626
|
-
if isinstance(tool, str):
|
1627
|
-
tool_def = self._generate_tool_definition(tool)
|
1628
|
-
if tool_def:
|
1629
|
-
formatted_tools.append(tool_def)
|
1630
|
-
elif isinstance(tool, dict):
|
1631
|
-
formatted_tools.append(tool)
|
1632
|
-
elif hasattr(tool, "to_openai_tool"):
|
1633
|
-
formatted_tools.append(tool.to_openai_tool())
|
1634
|
-
elif callable(tool):
|
1635
|
-
formatted_tools.append(self._generate_tool_definition(tool.__name__))
|
1636
|
-
|
1637
|
-
# Create async OpenAI client
|
1638
|
-
async_client = AsyncOpenAI()
|
1701
|
+
# Use the new _format_tools_for_completion helper method
|
1702
|
+
formatted_tools = self._format_tools_for_completion(tools)
|
1639
1703
|
|
1640
1704
|
# Make the API call based on the type of request
|
1641
1705
|
if tools:
|
1642
|
-
response = await async_client.chat.completions.create(
|
1706
|
+
response = await self._openai_client.async_client.chat.completions.create(
|
1643
1707
|
model=self.llm,
|
1644
1708
|
messages=messages,
|
1645
1709
|
temperature=temperature,
|
@@ -1651,7 +1715,7 @@ Your Goal: {self.goal}
|
|
1651
1715
|
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1652
1716
|
return result
|
1653
1717
|
elif output_json or output_pydantic:
|
1654
|
-
response = await async_client.chat.completions.create(
|
1718
|
+
response = await self._openai_client.async_client.chat.completions.create(
|
1655
1719
|
model=self.llm,
|
1656
1720
|
messages=messages,
|
1657
1721
|
temperature=temperature,
|
@@ -1663,7 +1727,7 @@ Your Goal: {self.goal}
|
|
1663
1727
|
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1664
1728
|
return response.choices[0].message.content
|
1665
1729
|
else:
|
1666
|
-
response = await async_client.chat.completions.create(
|
1730
|
+
response = await self._openai_client.async_client.chat.completions.create(
|
1667
1731
|
model=self.llm,
|
1668
1732
|
messages=messages,
|
1669
1733
|
temperature=temperature
|
@@ -1690,7 +1754,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1690
1754
|
]
|
1691
1755
|
|
1692
1756
|
try:
|
1693
|
-
reflection_response = await async_client.beta.chat.completions.parse(
|
1757
|
+
reflection_response = await self._openai_client.async_client.beta.chat.completions.parse(
|
1694
1758
|
model=self.reflect_llm if self.reflect_llm else self.llm,
|
1695
1759
|
messages=reflection_messages,
|
1696
1760
|
temperature=temperature,
|
@@ -1720,7 +1784,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1720
1784
|
{"role": "user", "content": "Now regenerate your response using the reflection you made"}
|
1721
1785
|
]
|
1722
1786
|
|
1723
|
-
new_response = await async_client.chat.completions.create(
|
1787
|
+
new_response = await self._openai_client.async_client.chat.completions.create(
|
1724
1788
|
model=self.llm,
|
1725
1789
|
messages=regenerate_messages,
|
1726
1790
|
temperature=temperature
|
@@ -1796,8 +1860,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1796
1860
|
{"role": "user", "content": formatted_results + "\nPlease process these results and provide a final response."}
|
1797
1861
|
]
|
1798
1862
|
try:
|
1799
|
-
|
1800
|
-
final_response = await async_client.chat.completions.create(
|
1863
|
+
final_response = await self._openai_client.async_client.chat.completions.create(
|
1801
1864
|
model=self.llm,
|
1802
1865
|
messages=messages,
|
1803
1866
|
temperature=0.2,
|
praisonaiagents/llm/__init__.py
CHANGED
@@ -20,6 +20,7 @@ logging.basicConfig(level=logging.WARNING)
|
|
20
20
|
|
21
21
|
# Import after suppressing warnings
|
22
22
|
from .llm import LLM, LLMContextLengthExceededException
|
23
|
+
from .openai_client import OpenAIClient, get_openai_client
|
23
24
|
|
24
25
|
# Ensure telemetry is disabled after import as well
|
25
26
|
try:
|
@@ -28,4 +29,4 @@ try:
|
|
28
29
|
except ImportError:
|
29
30
|
pass
|
30
31
|
|
31
|
-
__all__ = ["LLM", "LLMContextLengthExceededException"]
|
32
|
+
__all__ = ["LLM", "LLMContextLengthExceededException", "OpenAIClient", "get_openai_client"]
|
praisonaiagents/llm/llm.py
CHANGED
@@ -364,6 +364,66 @@ class LLM:
|
|
364
364
|
|
365
365
|
return messages, original_prompt
|
366
366
|
|
367
|
+
def _format_tools_for_litellm(self, tools: Optional[List[Any]]) -> Optional[List[Dict]]:
|
368
|
+
"""Format tools for LiteLLM - handles all tool formats.
|
369
|
+
|
370
|
+
Supports:
|
371
|
+
- Pre-formatted OpenAI tools (dicts with type='function')
|
372
|
+
- Lists of pre-formatted tools
|
373
|
+
- Callable functions
|
374
|
+
- String function names
|
375
|
+
|
376
|
+
Args:
|
377
|
+
tools: List of tools in various formats
|
378
|
+
|
379
|
+
Returns:
|
380
|
+
List of formatted tools or None
|
381
|
+
"""
|
382
|
+
if not tools:
|
383
|
+
return None
|
384
|
+
|
385
|
+
formatted_tools = []
|
386
|
+
for tool in tools:
|
387
|
+
# Check if the tool is already in OpenAI format (e.g. from MCP.to_openai_tool())
|
388
|
+
if isinstance(tool, dict) and 'type' in tool and tool['type'] == 'function':
|
389
|
+
# Validate nested dictionary structure before accessing
|
390
|
+
if 'function' in tool and isinstance(tool['function'], dict) and 'name' in tool['function']:
|
391
|
+
logging.debug(f"Using pre-formatted OpenAI tool: {tool['function']['name']}")
|
392
|
+
formatted_tools.append(tool)
|
393
|
+
else:
|
394
|
+
logging.debug(f"Skipping malformed OpenAI tool: missing function or name")
|
395
|
+
# Handle lists of tools (e.g. from MCP.to_openai_tool())
|
396
|
+
elif isinstance(tool, list):
|
397
|
+
for subtool in tool:
|
398
|
+
if isinstance(subtool, dict) and 'type' in subtool and subtool['type'] == 'function':
|
399
|
+
# Validate nested dictionary structure before accessing
|
400
|
+
if 'function' in subtool and isinstance(subtool['function'], dict) and 'name' in subtool['function']:
|
401
|
+
logging.debug(f"Using pre-formatted OpenAI tool from list: {subtool['function']['name']}")
|
402
|
+
formatted_tools.append(subtool)
|
403
|
+
else:
|
404
|
+
logging.debug(f"Skipping malformed OpenAI tool in list: missing function or name")
|
405
|
+
elif callable(tool):
|
406
|
+
tool_def = self._generate_tool_definition(tool)
|
407
|
+
if tool_def:
|
408
|
+
formatted_tools.append(tool_def)
|
409
|
+
elif isinstance(tool, str):
|
410
|
+
tool_def = self._generate_tool_definition(tool)
|
411
|
+
if tool_def:
|
412
|
+
formatted_tools.append(tool_def)
|
413
|
+
else:
|
414
|
+
logging.debug(f"Skipping tool of unsupported type: {type(tool)}")
|
415
|
+
|
416
|
+
# Validate JSON serialization before returning
|
417
|
+
if formatted_tools:
|
418
|
+
try:
|
419
|
+
import json
|
420
|
+
json.dumps(formatted_tools) # Validate serialization
|
421
|
+
except (TypeError, ValueError) as e:
|
422
|
+
logging.error(f"Tools are not JSON serializable: {e}")
|
423
|
+
return None
|
424
|
+
|
425
|
+
return formatted_tools if formatted_tools else None
|
426
|
+
|
367
427
|
def get_response(
|
368
428
|
self,
|
369
429
|
prompt: Union[str, List[Dict]],
|
@@ -445,33 +505,7 @@ class LLM:
|
|
445
505
|
litellm.set_verbose = False
|
446
506
|
|
447
507
|
# Format tools if provided
|
448
|
-
formatted_tools =
|
449
|
-
if tools:
|
450
|
-
formatted_tools = []
|
451
|
-
for tool in tools:
|
452
|
-
# Check if the tool is already in OpenAI format (e.g. from MCP.to_openai_tool())
|
453
|
-
if isinstance(tool, dict) and 'type' in tool and tool['type'] == 'function':
|
454
|
-
logging.debug(f"Using pre-formatted OpenAI tool: {tool['function']['name']}")
|
455
|
-
formatted_tools.append(tool)
|
456
|
-
# Handle lists of tools (e.g. from MCP.to_openai_tool())
|
457
|
-
elif isinstance(tool, list):
|
458
|
-
for subtool in tool:
|
459
|
-
if isinstance(subtool, dict) and 'type' in subtool and subtool['type'] == 'function':
|
460
|
-
logging.debug(f"Using pre-formatted OpenAI tool from list: {subtool['function']['name']}")
|
461
|
-
formatted_tools.append(subtool)
|
462
|
-
elif callable(tool):
|
463
|
-
tool_def = self._generate_tool_definition(tool.__name__)
|
464
|
-
if tool_def:
|
465
|
-
formatted_tools.append(tool_def)
|
466
|
-
elif isinstance(tool, str):
|
467
|
-
tool_def = self._generate_tool_definition(tool)
|
468
|
-
if tool_def:
|
469
|
-
formatted_tools.append(tool_def)
|
470
|
-
else:
|
471
|
-
logging.debug(f"Skipping tool of unsupported type: {type(tool)}")
|
472
|
-
|
473
|
-
if not formatted_tools:
|
474
|
-
formatted_tools = None
|
508
|
+
formatted_tools = self._format_tools_for_litellm(tools)
|
475
509
|
|
476
510
|
# Build messages list using shared helper
|
477
511
|
messages, original_prompt = self._build_messages(
|
@@ -1202,74 +1236,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1202
1236
|
start_time = time.time()
|
1203
1237
|
reflection_count = 0
|
1204
1238
|
|
1205
|
-
# Format tools for LiteLLM
|
1206
|
-
formatted_tools =
|
1207
|
-
if tools:
|
1208
|
-
logging.debug(f"Starting tool formatting for {len(tools)} tools")
|
1209
|
-
formatted_tools = []
|
1210
|
-
for tool in tools:
|
1211
|
-
logging.debug(f"Processing tool: {tool.__name__ if hasattr(tool, '__name__') else str(tool)}")
|
1212
|
-
if hasattr(tool, '__name__'):
|
1213
|
-
tool_name = tool.__name__
|
1214
|
-
tool_doc = tool.__doc__ or "No description available"
|
1215
|
-
# Get function signature
|
1216
|
-
import inspect
|
1217
|
-
sig = inspect.signature(tool)
|
1218
|
-
logging.debug(f"Tool signature: {sig}")
|
1219
|
-
params = {}
|
1220
|
-
required = []
|
1221
|
-
for name, param in sig.parameters.items():
|
1222
|
-
logging.debug(f"Processing parameter: {name} with annotation: {param.annotation}")
|
1223
|
-
param_type = "string"
|
1224
|
-
if param.annotation != inspect.Parameter.empty:
|
1225
|
-
if param.annotation == int:
|
1226
|
-
param_type = "integer"
|
1227
|
-
elif param.annotation == float:
|
1228
|
-
param_type = "number"
|
1229
|
-
elif param.annotation == bool:
|
1230
|
-
param_type = "boolean"
|
1231
|
-
elif param.annotation == Dict:
|
1232
|
-
param_type = "object"
|
1233
|
-
elif param.annotation == List:
|
1234
|
-
param_type = "array"
|
1235
|
-
elif hasattr(param.annotation, "__name__"):
|
1236
|
-
param_type = param.annotation.__name__.lower()
|
1237
|
-
params[name] = {"type": param_type}
|
1238
|
-
if param.default == inspect.Parameter.empty:
|
1239
|
-
required.append(name)
|
1240
|
-
|
1241
|
-
logging.debug(f"Generated parameters: {params}")
|
1242
|
-
logging.debug(f"Required parameters: {required}")
|
1243
|
-
|
1244
|
-
tool_def = {
|
1245
|
-
"type": "function",
|
1246
|
-
"function": {
|
1247
|
-
"name": tool_name,
|
1248
|
-
"description": tool_doc,
|
1249
|
-
"parameters": {
|
1250
|
-
"type": "object",
|
1251
|
-
"properties": params,
|
1252
|
-
"required": required
|
1253
|
-
}
|
1254
|
-
}
|
1255
|
-
}
|
1256
|
-
# Ensure tool definition is JSON serializable
|
1257
|
-
try:
|
1258
|
-
json.dumps(tool_def) # Test serialization
|
1259
|
-
logging.debug(f"Generated tool definition: {tool_def}")
|
1260
|
-
formatted_tools.append(tool_def)
|
1261
|
-
except TypeError as e:
|
1262
|
-
logging.error(f"Tool definition not JSON serializable: {e}")
|
1263
|
-
continue
|
1264
|
-
|
1265
|
-
# Validate final tools list
|
1266
|
-
if formatted_tools:
|
1267
|
-
try:
|
1268
|
-
json.dumps(formatted_tools) # Final serialization check
|
1269
|
-
logging.debug(f"Final formatted tools: {json.dumps(formatted_tools, indent=2)}")
|
1270
|
-
except TypeError as e:
|
1271
|
-
logging.error(f"Final tools list not JSON serializable: {e}")
|
1272
|
-
formatted_tools = None
|
1239
|
+
# Format tools for LiteLLM using the shared helper
|
1240
|
+
formatted_tools = self._format_tools_for_litellm(tools)
|
1273
1241
|
|
1274
1242
|
response_text = ""
|
1275
1243
|
if reasoning_steps:
|
@@ -2070,36 +2038,44 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2070
2038
|
display_error(f"Error in response_async: {str(error)}")
|
2071
2039
|
raise
|
2072
2040
|
|
2073
|
-
def _generate_tool_definition(self,
|
2074
|
-
"""Generate a tool definition from a function name."""
|
2075
|
-
|
2076
|
-
|
2077
|
-
|
2078
|
-
|
2079
|
-
|
2080
|
-
|
2081
|
-
|
2082
|
-
|
2083
|
-
|
2084
|
-
|
2085
|
-
|
2086
|
-
|
2087
|
-
|
2088
|
-
logging.debug(f"
|
2089
|
-
|
2041
|
+
def _generate_tool_definition(self, function_or_name) -> Optional[Dict]:
|
2042
|
+
"""Generate a tool definition from a function or function name."""
|
2043
|
+
if callable(function_or_name):
|
2044
|
+
# Function object passed directly
|
2045
|
+
func = function_or_name
|
2046
|
+
function_name = func.__name__
|
2047
|
+
logging.debug(f"Generating tool definition for callable: {function_name}")
|
2048
|
+
else:
|
2049
|
+
# Function name string passed
|
2050
|
+
function_name = function_or_name
|
2051
|
+
logging.debug(f"Attempting to generate tool definition for: {function_name}")
|
2052
|
+
|
2053
|
+
# First try to get the tool definition if it exists
|
2054
|
+
tool_def_name = f"{function_name}_definition"
|
2055
|
+
tool_def = globals().get(tool_def_name)
|
2056
|
+
logging.debug(f"Looking for {tool_def_name} in globals: {tool_def is not None}")
|
2057
|
+
|
2058
|
+
if not tool_def:
|
2059
|
+
import __main__
|
2060
|
+
tool_def = getattr(__main__, tool_def_name, None)
|
2061
|
+
logging.debug(f"Looking for {tool_def_name} in __main__: {tool_def is not None}")
|
2062
|
+
|
2063
|
+
if tool_def:
|
2064
|
+
logging.debug(f"Found tool definition: {tool_def}")
|
2065
|
+
return tool_def
|
2090
2066
|
|
2091
|
-
|
2092
|
-
|
2093
|
-
|
2094
|
-
|
2095
|
-
|
2096
|
-
|
2097
|
-
|
2098
|
-
|
2099
|
-
|
2100
|
-
|
2101
|
-
|
2102
|
-
|
2067
|
+
# Try to find the function
|
2068
|
+
func = globals().get(function_name)
|
2069
|
+
logging.debug(f"Looking for {function_name} in globals: {func is not None}")
|
2070
|
+
|
2071
|
+
if not func:
|
2072
|
+
import __main__
|
2073
|
+
func = getattr(__main__, function_name, None)
|
2074
|
+
logging.debug(f"Looking for {function_name} in __main__: {func is not None}")
|
2075
|
+
|
2076
|
+
if not func or not callable(func):
|
2077
|
+
logging.debug(f"Function {function_name} not found or not callable")
|
2078
|
+
return None
|
2103
2079
|
|
2104
2080
|
import inspect
|
2105
2081
|
# Handle Langchain and CrewAI tools
|
@@ -0,0 +1,260 @@
|
|
1
|
+
"""
|
2
|
+
OpenAI Client Module
|
3
|
+
|
4
|
+
This module provides a unified interface for OpenAI API interactions,
|
5
|
+
supporting both synchronous and asynchronous operations.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import os
|
9
|
+
import logging
|
10
|
+
from typing import Any, Dict, List, Optional, Union, AsyncIterator, Iterator
|
11
|
+
from openai import OpenAI, AsyncOpenAI
|
12
|
+
from openai.types.chat import ChatCompletionChunk
|
13
|
+
import asyncio
|
14
|
+
from pydantic import BaseModel
|
15
|
+
|
16
|
+
# Constants
|
17
|
+
LOCAL_SERVER_API_KEY_PLACEHOLDER = "not-needed"
|
18
|
+
|
19
|
+
class OpenAIClient:
|
20
|
+
"""
|
21
|
+
Unified OpenAI client wrapper for sync/async operations.
|
22
|
+
|
23
|
+
This class encapsulates all OpenAI-specific logic, providing a clean
|
24
|
+
interface for chat completions, streaming, and structured outputs.
|
25
|
+
"""
|
26
|
+
|
27
|
+
def __init__(self, api_key: Optional[str] = None, base_url: Optional[str] = None):
|
28
|
+
"""
|
29
|
+
Initialize the OpenAI client with proper API key handling.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
api_key: OpenAI API key (defaults to OPENAI_API_KEY env var)
|
33
|
+
base_url: Custom base URL for API endpoints (defaults to OPENAI_API_BASE env var)
|
34
|
+
"""
|
35
|
+
# Use provided values or fall back to environment variables
|
36
|
+
self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
37
|
+
self.base_url = base_url or os.environ.get("OPENAI_API_BASE") or os.environ.get("OPENAI_BASE_URL")
|
38
|
+
|
39
|
+
# For local servers like LM Studio, allow minimal API key
|
40
|
+
if self.base_url and not self.api_key:
|
41
|
+
self.api_key = LOCAL_SERVER_API_KEY_PLACEHOLDER
|
42
|
+
elif not self.api_key:
|
43
|
+
raise ValueError(
|
44
|
+
"OPENAI_API_KEY environment variable is required for the default OpenAI service. "
|
45
|
+
"If you are targeting a local server (e.g., LM Studio), ensure OPENAI_API_BASE is set "
|
46
|
+
f"(e.g., 'http://localhost:1234/v1') and you can use a placeholder API key by setting OPENAI_API_KEY='{LOCAL_SERVER_API_KEY_PLACEHOLDER}'"
|
47
|
+
)
|
48
|
+
|
49
|
+
# Initialize synchronous client (lazy loading for async)
|
50
|
+
self._sync_client = OpenAI(api_key=self.api_key, base_url=self.base_url)
|
51
|
+
self._async_client = None
|
52
|
+
|
53
|
+
# Set up logging
|
54
|
+
self.logger = logging.getLogger(__name__)
|
55
|
+
|
56
|
+
@property
|
57
|
+
def sync_client(self) -> OpenAI:
|
58
|
+
"""Get the synchronous OpenAI client."""
|
59
|
+
return self._sync_client
|
60
|
+
|
61
|
+
@property
|
62
|
+
def async_client(self) -> AsyncOpenAI:
|
63
|
+
"""Get the asynchronous OpenAI client (lazy initialization)."""
|
64
|
+
if self._async_client is None:
|
65
|
+
self._async_client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
|
66
|
+
return self._async_client
|
67
|
+
|
68
|
+
def create_completion(
|
69
|
+
self,
|
70
|
+
messages: List[Dict[str, Any]],
|
71
|
+
model: str = "gpt-4o",
|
72
|
+
temperature: float = 0.7,
|
73
|
+
stream: bool = False,
|
74
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
75
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
76
|
+
**kwargs
|
77
|
+
) -> Union[Any, Iterator[ChatCompletionChunk]]:
|
78
|
+
"""
|
79
|
+
Create a chat completion using the synchronous client.
|
80
|
+
|
81
|
+
Args:
|
82
|
+
messages: List of message dictionaries
|
83
|
+
model: Model to use for completion
|
84
|
+
temperature: Sampling temperature
|
85
|
+
stream: Whether to stream the response
|
86
|
+
tools: List of tools/functions available
|
87
|
+
tool_choice: Tool selection preference
|
88
|
+
**kwargs: Additional parameters to pass to the API
|
89
|
+
|
90
|
+
Returns:
|
91
|
+
ChatCompletion object or stream iterator
|
92
|
+
"""
|
93
|
+
params = {
|
94
|
+
"model": model,
|
95
|
+
"messages": messages,
|
96
|
+
"temperature": temperature,
|
97
|
+
"stream": stream,
|
98
|
+
**kwargs
|
99
|
+
}
|
100
|
+
|
101
|
+
# Add tools if provided
|
102
|
+
if tools:
|
103
|
+
params["tools"] = tools
|
104
|
+
if tool_choice is not None:
|
105
|
+
params["tool_choice"] = tool_choice
|
106
|
+
|
107
|
+
try:
|
108
|
+
return self._sync_client.chat.completions.create(**params)
|
109
|
+
except Exception as e:
|
110
|
+
self.logger.error(f"Error creating completion: {e}")
|
111
|
+
raise
|
112
|
+
|
113
|
+
async def acreate_completion(
|
114
|
+
self,
|
115
|
+
messages: List[Dict[str, Any]],
|
116
|
+
model: str = "gpt-4o",
|
117
|
+
temperature: float = 0.7,
|
118
|
+
stream: bool = False,
|
119
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
120
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
121
|
+
**kwargs
|
122
|
+
) -> Union[Any, AsyncIterator[ChatCompletionChunk]]:
|
123
|
+
"""
|
124
|
+
Create a chat completion using the asynchronous client.
|
125
|
+
|
126
|
+
Args:
|
127
|
+
messages: List of message dictionaries
|
128
|
+
model: Model to use for completion
|
129
|
+
temperature: Sampling temperature
|
130
|
+
stream: Whether to stream the response
|
131
|
+
tools: List of tools/functions available
|
132
|
+
tool_choice: Tool selection preference
|
133
|
+
**kwargs: Additional parameters to pass to the API
|
134
|
+
|
135
|
+
Returns:
|
136
|
+
ChatCompletion object or async stream iterator
|
137
|
+
"""
|
138
|
+
params = {
|
139
|
+
"model": model,
|
140
|
+
"messages": messages,
|
141
|
+
"temperature": temperature,
|
142
|
+
"stream": stream,
|
143
|
+
**kwargs
|
144
|
+
}
|
145
|
+
|
146
|
+
# Add tools if provided
|
147
|
+
if tools:
|
148
|
+
params["tools"] = tools
|
149
|
+
if tool_choice is not None:
|
150
|
+
params["tool_choice"] = tool_choice
|
151
|
+
|
152
|
+
try:
|
153
|
+
return await self.async_client.chat.completions.create(**params)
|
154
|
+
except Exception as e:
|
155
|
+
self.logger.error(f"Error creating async completion: {e}")
|
156
|
+
raise
|
157
|
+
|
158
|
+
def parse_structured_output(
|
159
|
+
self,
|
160
|
+
messages: List[Dict[str, Any]],
|
161
|
+
response_format: BaseModel,
|
162
|
+
model: str = "gpt-4o",
|
163
|
+
temperature: float = 0.7,
|
164
|
+
**kwargs
|
165
|
+
) -> Any:
|
166
|
+
"""
|
167
|
+
Parse structured output using the beta.chat.completions.parse API.
|
168
|
+
|
169
|
+
Args:
|
170
|
+
messages: List of message dictionaries
|
171
|
+
response_format: Pydantic model for response validation
|
172
|
+
model: Model to use for completion
|
173
|
+
temperature: Sampling temperature
|
174
|
+
**kwargs: Additional parameters to pass to the API
|
175
|
+
|
176
|
+
Returns:
|
177
|
+
Parsed response according to the response_format
|
178
|
+
"""
|
179
|
+
try:
|
180
|
+
response = self._sync_client.beta.chat.completions.parse(
|
181
|
+
model=model,
|
182
|
+
messages=messages,
|
183
|
+
temperature=temperature,
|
184
|
+
response_format=response_format,
|
185
|
+
**kwargs
|
186
|
+
)
|
187
|
+
return response.choices[0].message.parsed
|
188
|
+
except Exception as e:
|
189
|
+
self.logger.error(f"Error parsing structured output: {e}")
|
190
|
+
raise
|
191
|
+
|
192
|
+
async def aparse_structured_output(
|
193
|
+
self,
|
194
|
+
messages: List[Dict[str, Any]],
|
195
|
+
response_format: BaseModel,
|
196
|
+
model: str = "gpt-4o",
|
197
|
+
temperature: float = 0.7,
|
198
|
+
**kwargs
|
199
|
+
) -> Any:
|
200
|
+
"""
|
201
|
+
Parse structured output using the async beta.chat.completions.parse API.
|
202
|
+
|
203
|
+
Args:
|
204
|
+
messages: List of message dictionaries
|
205
|
+
response_format: Pydantic model for response validation
|
206
|
+
model: Model to use for completion
|
207
|
+
temperature: Sampling temperature
|
208
|
+
**kwargs: Additional parameters to pass to the API
|
209
|
+
|
210
|
+
Returns:
|
211
|
+
Parsed response according to the response_format
|
212
|
+
"""
|
213
|
+
try:
|
214
|
+
response = await self.async_client.beta.chat.completions.parse(
|
215
|
+
model=model,
|
216
|
+
messages=messages,
|
217
|
+
temperature=temperature,
|
218
|
+
response_format=response_format,
|
219
|
+
**kwargs
|
220
|
+
)
|
221
|
+
return response.choices[0].message.parsed
|
222
|
+
except Exception as e:
|
223
|
+
self.logger.error(f"Error parsing async structured output: {e}")
|
224
|
+
raise
|
225
|
+
|
226
|
+
def close(self):
|
227
|
+
"""Close the OpenAI clients."""
|
228
|
+
if hasattr(self._sync_client, 'close'):
|
229
|
+
self._sync_client.close()
|
230
|
+
if self._async_client and hasattr(self._async_client, 'close'):
|
231
|
+
self._async_client.close()
|
232
|
+
|
233
|
+
async def aclose(self):
|
234
|
+
"""Asynchronously close the OpenAI clients."""
|
235
|
+
if hasattr(self._sync_client, 'close'):
|
236
|
+
await asyncio.to_thread(self._sync_client.close)
|
237
|
+
if self._async_client and hasattr(self._async_client, 'aclose'):
|
238
|
+
await self._async_client.aclose()
|
239
|
+
|
240
|
+
|
241
|
+
# Global client instance (similar to main.py pattern)
|
242
|
+
_global_client = None
|
243
|
+
|
244
|
+
def get_openai_client(api_key: Optional[str] = None, base_url: Optional[str] = None) -> OpenAIClient:
|
245
|
+
"""
|
246
|
+
Get or create a global OpenAI client instance.
|
247
|
+
|
248
|
+
Args:
|
249
|
+
api_key: OpenAI API key (defaults to OPENAI_API_KEY env var)
|
250
|
+
base_url: Custom base URL for API endpoints
|
251
|
+
|
252
|
+
Returns:
|
253
|
+
OpenAIClient instance
|
254
|
+
"""
|
255
|
+
global _global_client
|
256
|
+
|
257
|
+
if _global_client is None:
|
258
|
+
_global_client = OpenAIClient(api_key=api_key, base_url=base_url)
|
259
|
+
|
260
|
+
return _global_client
|
@@ -3,7 +3,7 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
|
|
3
3
|
praisonaiagents/main.py,sha256=_-XE7_Y7ChvtLQMivfNFrrnAhv4wSSDhH9WJMWlkS0w,16315
|
4
4
|
praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
|
5
5
|
praisonaiagents/agent/__init__.py,sha256=IhIDtAkfJ99cxbttwou52coih_AejS2-jpazsX6LbDY,350
|
6
|
-
praisonaiagents/agent/agent.py,sha256=
|
6
|
+
praisonaiagents/agent/agent.py,sha256=a5SxvqzNi2m5ralng0vTBT0zdEDicQmq0lEvFJKxIgM,116873
|
7
7
|
praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
|
8
8
|
praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
|
9
9
|
praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
|
@@ -15,8 +15,9 @@ praisonaiagents/guardrails/llm_guardrail.py,sha256=MTTqmYDdZX-18QN9T17T5P_6H2qnV
|
|
15
15
|
praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
|
16
16
|
praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
|
17
17
|
praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
|
18
|
-
praisonaiagents/llm/__init__.py,sha256=
|
19
|
-
praisonaiagents/llm/llm.py,sha256=
|
18
|
+
praisonaiagents/llm/__init__.py,sha256=mWzKHtjcpmUl20HMdjg_Gbyb3-sPHxmiS2LGluVKB3A,1021
|
19
|
+
praisonaiagents/llm/llm.py,sha256=z7o4tlKO0NJCqaXlnlwtPT768YjAB6tqNe_lg2KMTkk,111271
|
20
|
+
praisonaiagents/llm/openai_client.py,sha256=uzSeM6OtBoEjrjBLPjbRBt7sPwCz-kwl6Yk8rBE_6Mo,9205
|
20
21
|
praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
|
21
22
|
praisonaiagents/mcp/mcp.py,sha256=-fFx4MHffnN2woLnnV7Pzx3-1SFkn2j8Gp5F5ZIwKJ0,19698
|
22
23
|
praisonaiagents/mcp/mcp_sse.py,sha256=z8TMFhW9xuLQ7QnpOa3n1-nSHt0-Bf27qso0u4qxYSY,8357
|
@@ -52,7 +53,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
52
53
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
53
54
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
54
55
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
55
|
-
praisonaiagents-0.0.
|
56
|
-
praisonaiagents-0.0.
|
57
|
-
praisonaiagents-0.0.
|
58
|
-
praisonaiagents-0.0.
|
56
|
+
praisonaiagents-0.0.114.dist-info/METADATA,sha256=zX76iv2EO8DxcWXRGZ244CVqllOUBqiRabTTCom27DM,1669
|
57
|
+
praisonaiagents-0.0.114.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
58
|
+
praisonaiagents-0.0.114.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
59
|
+
praisonaiagents-0.0.114.dist-info/RECORD,,
|
File without changes
|
File without changes
|