meshagent-openai 0.0.3__tar.gz → 0.0.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of meshagent-openai might be problematic. Click here for more details.
- {meshagent_openai-0.0.3/meshagent_openai.egg-info → meshagent_openai-0.0.4}/PKG-INFO +4 -4
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/meshagent/openai/tools/completions_adapter.py +7 -6
- meshagent_openai-0.0.4/meshagent/openai/tools/responses_adapter.py +538 -0
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4/meshagent_openai.egg-info}/PKG-INFO +4 -4
- meshagent_openai-0.0.4/meshagent_openai.egg-info/requires.txt +7 -0
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/setup.py +3 -3
- meshagent_openai-0.0.4/version.py +1 -0
- meshagent_openai-0.0.3/meshagent/openai/tools/responses_adapter.py +0 -388
- meshagent_openai-0.0.3/meshagent_openai.egg-info/requires.txt +0 -7
- meshagent_openai-0.0.3/version.py +0 -1
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/LICENSE +0 -0
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/MANIFEST.in +0 -0
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/README.md +0 -0
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/meshagent/openai/__init__.py +0 -0
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/meshagent/openai/tools/__init__.py +0 -0
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/meshagent_openai.egg-info/SOURCES.txt +0 -0
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/meshagent_openai.egg-info/dependency_links.txt +0 -0
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/meshagent_openai.egg-info/top_level.txt +0 -0
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/pyproject.toml +0 -0
- {meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: meshagent-openai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.4
|
|
4
4
|
Summary: OpenAI Building Blocks for Meshagent
|
|
5
5
|
Home-page:
|
|
6
6
|
License: Apache License 2.0
|
|
@@ -14,9 +14,9 @@ Requires-Dist: pyjwt>=2.0.0
|
|
|
14
14
|
Requires-Dist: pytest>=8.3.4
|
|
15
15
|
Requires-Dist: pytest-asyncio>=0.24.0
|
|
16
16
|
Requires-Dist: openai>=1.66.2
|
|
17
|
-
Requires-Dist: meshagent-api>=0.0.
|
|
18
|
-
Requires-Dist: meshagent-agents>=0.0.
|
|
19
|
-
Requires-Dist: meshagent-tools>=0.0.
|
|
17
|
+
Requires-Dist: meshagent-api>=0.0.4
|
|
18
|
+
Requires-Dist: meshagent-agents>=0.0.4
|
|
19
|
+
Requires-Dist: meshagent-tools>=0.0.4
|
|
20
20
|
Dynamic: description-content-type
|
|
21
21
|
Dynamic: license
|
|
22
22
|
Dynamic: project-url
|
{meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/meshagent/openai/tools/completions_adapter.py
RENAMED
|
@@ -4,7 +4,7 @@ from meshagent.api import WebSocketClientProtocol, RoomClient, RoomException
|
|
|
4
4
|
from meshagent.tools.blob import Blob, BlobStorage
|
|
5
5
|
from meshagent.tools import Toolkit, ToolContext, Tool
|
|
6
6
|
from meshagent.api.messaging import Response, LinkResponse, FileResponse, JsonResponse, TextResponse, EmptyResponse
|
|
7
|
-
from meshagent.
|
|
7
|
+
from meshagent.api.schema_util import prompt_schema
|
|
8
8
|
from meshagent.agents.adapter import ToolResponseAdapter, LLMAdapter
|
|
9
9
|
from uuid import uuid4
|
|
10
10
|
import json
|
|
@@ -198,7 +198,7 @@ class OpenAICompletionsToolResponseAdapter(ToolResponseAdapter):
|
|
|
198
198
|
else:
|
|
199
199
|
raise Exception("unexpected return type: {type}".format(type=type(response)))
|
|
200
200
|
|
|
201
|
-
async def
|
|
201
|
+
async def create_messages(self, *, context: AgentChatContext, tool_call: Any, room: RoomClient, response: Response) -> list:
|
|
202
202
|
|
|
203
203
|
message = {
|
|
204
204
|
"role" : "tool",
|
|
@@ -209,7 +209,7 @@ class OpenAICompletionsToolResponseAdapter(ToolResponseAdapter):
|
|
|
209
209
|
|
|
210
210
|
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message })
|
|
211
211
|
|
|
212
|
-
|
|
212
|
+
return [ message ]
|
|
213
213
|
|
|
214
214
|
|
|
215
215
|
|
|
@@ -330,16 +330,17 @@ class OpenAICompletionsAdapter(LLMAdapter):
|
|
|
330
330
|
)
|
|
331
331
|
tool_response = await tool_bundle.execute(context=tool_context, tool_call=tool_call)
|
|
332
332
|
logger.info(f"tool response {tool_response}")
|
|
333
|
-
await tool_adapter.
|
|
333
|
+
return await tool_adapter.create_messages(context=context, tool_call=tool_call, room=room, response=tool_response)
|
|
334
|
+
|
|
334
335
|
except Exception as e:
|
|
335
336
|
logger.error(f"unable to complete tool call {tool_call}", exc_info=e)
|
|
336
337
|
room.developer.log_nowait(type="llm.error", data={ "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "error" : f"{e}" })
|
|
337
338
|
|
|
338
|
-
return {
|
|
339
|
+
return [{
|
|
339
340
|
"role" : "tool",
|
|
340
341
|
"content" : json.dumps({"error":f"unable to complete tool call: {e}"}),
|
|
341
342
|
"tool_call_id" : tool_call.id,
|
|
342
|
-
}
|
|
343
|
+
}]
|
|
343
344
|
|
|
344
345
|
|
|
345
346
|
for tool_call in message.tool_calls:
|
|
@@ -0,0 +1,538 @@
|
|
|
1
|
+
|
|
2
|
+
from meshagent.agents.agent import Agent, AgentChatContext, AgentCallContext
|
|
3
|
+
from meshagent.api import WebSocketClientProtocol, RoomClient, RoomException
|
|
4
|
+
from meshagent.tools.blob import Blob, BlobStorage
|
|
5
|
+
from meshagent.tools import Toolkit, ToolContext, Tool
|
|
6
|
+
from meshagent.api.messaging import Response, LinkResponse, FileResponse, JsonResponse, TextResponse, EmptyResponse, RawOutputs
|
|
7
|
+
from meshagent.api.schema_util import prompt_schema
|
|
8
|
+
from meshagent.agents.adapter import ToolResponseAdapter, LLMAdapter
|
|
9
|
+
from uuid import uuid4
|
|
10
|
+
import json
|
|
11
|
+
from jsonschema import validate
|
|
12
|
+
from typing import List, Dict
|
|
13
|
+
|
|
14
|
+
from openai import AsyncOpenAI, APIStatusError, NOT_GIVEN
|
|
15
|
+
from openai.types.responses import ResponseFunctionToolCall, ResponseStreamEvent
|
|
16
|
+
|
|
17
|
+
from copy import deepcopy
|
|
18
|
+
from abc import abstractmethod, ABC
|
|
19
|
+
import os
|
|
20
|
+
import jsonschema
|
|
21
|
+
from typing import Optional, Any, Callable
|
|
22
|
+
|
|
23
|
+
import logging
|
|
24
|
+
import re
|
|
25
|
+
import asyncio
|
|
26
|
+
|
|
27
|
+
logging.basicConfig()
|
|
28
|
+
logger = logging.getLogger("openai_agent")
|
|
29
|
+
logger.setLevel(logging.INFO)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> str:
|
|
35
|
+
"""
|
|
36
|
+
Replaces every character in `text` that does not match the given
|
|
37
|
+
`allowed_chars` regex set with `replacement`.
|
|
38
|
+
|
|
39
|
+
Parameters:
|
|
40
|
+
-----------
|
|
41
|
+
text : str
|
|
42
|
+
The input string on which the replacement is to be done.
|
|
43
|
+
allowed_chars : str
|
|
44
|
+
A string defining the set of allowed characters (part of a character set).
|
|
45
|
+
For example, "a-zA-Z0-9" will keep only letters and digits.
|
|
46
|
+
replacement : str
|
|
47
|
+
The string to replace non-matching characters with.
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
--------
|
|
51
|
+
str
|
|
52
|
+
A new string where all characters not in `allowed_chars` are replaced.
|
|
53
|
+
"""
|
|
54
|
+
# Build a regex that matches any character NOT in allowed_chars
|
|
55
|
+
pattern = rf"[^{allowed_chars}]"
|
|
56
|
+
return re.sub(pattern, replacement, text)
|
|
57
|
+
|
|
58
|
+
def safe_tool_name(name: str):
|
|
59
|
+
return _replace_non_matching(name, "a-zA-Z0-9_-", "_")
|
|
60
|
+
|
|
61
|
+
# Collects a group of tool proxies and manages execution of openai tool calls
|
|
62
|
+
class ResponsesToolBundle:
|
|
63
|
+
def __init__(self, toolkits: List[Toolkit]):
|
|
64
|
+
self._toolkits = toolkits
|
|
65
|
+
self._executors = dict[str, Toolkit]()
|
|
66
|
+
self._safe_names = {}
|
|
67
|
+
self._tools_by_name = {}
|
|
68
|
+
|
|
69
|
+
open_ai_tools = []
|
|
70
|
+
|
|
71
|
+
for toolkit in toolkits:
|
|
72
|
+
for v in toolkit.tools:
|
|
73
|
+
|
|
74
|
+
k = v.name
|
|
75
|
+
|
|
76
|
+
name = safe_tool_name(k)
|
|
77
|
+
|
|
78
|
+
if k in self._executors:
|
|
79
|
+
raise Exception(f"duplicate in bundle '{k}', tool names must be unique.")
|
|
80
|
+
|
|
81
|
+
self._executors[k] = toolkit
|
|
82
|
+
|
|
83
|
+
self._safe_names[name] = k
|
|
84
|
+
self._tools_by_name[name] = v
|
|
85
|
+
|
|
86
|
+
if v.name != "computer_call":
|
|
87
|
+
|
|
88
|
+
fn = {
|
|
89
|
+
"type" : "function",
|
|
90
|
+
"name" : name,
|
|
91
|
+
"description" : v.description,
|
|
92
|
+
"parameters" : {
|
|
93
|
+
**v.input_schema,
|
|
94
|
+
},
|
|
95
|
+
"strict": True,
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
if v.defs != None:
|
|
100
|
+
fn["parameters"]["$defs"] = v.defs
|
|
101
|
+
|
|
102
|
+
open_ai_tools.append(fn)
|
|
103
|
+
|
|
104
|
+
else:
|
|
105
|
+
|
|
106
|
+
open_ai_tools.append(v.options)
|
|
107
|
+
|
|
108
|
+
if len(open_ai_tools) == 0:
|
|
109
|
+
open_ai_tools = None
|
|
110
|
+
|
|
111
|
+
self._open_ai_tools = open_ai_tools
|
|
112
|
+
|
|
113
|
+
async def execute(self, *, context: ToolContext, tool_call: ResponseFunctionToolCall) -> Response:
|
|
114
|
+
try:
|
|
115
|
+
|
|
116
|
+
name = tool_call.name
|
|
117
|
+
arguments = json.loads(tool_call.arguments)
|
|
118
|
+
|
|
119
|
+
if name not in self._safe_names:
|
|
120
|
+
raise RoomException(f"Invalid tool name {name}, check the name of the tool")
|
|
121
|
+
|
|
122
|
+
name = self._safe_names[name]
|
|
123
|
+
|
|
124
|
+
if name not in self._executors:
|
|
125
|
+
raise Exception(f"Unregistered tool name {name}")
|
|
126
|
+
|
|
127
|
+
logger.info("executing %s %s %s", tool_call.id, name, arguments)
|
|
128
|
+
|
|
129
|
+
proxy = self._executors[name]
|
|
130
|
+
result = await proxy.execute(context=context, name=name, arguments=arguments)
|
|
131
|
+
logger.info("success calling %s %s %s", tool_call.id, name, result)
|
|
132
|
+
return result
|
|
133
|
+
|
|
134
|
+
except Exception as e:
|
|
135
|
+
logger.error("failed calling %s %s", tool_call.id, name, exc_info=e)
|
|
136
|
+
raise
|
|
137
|
+
|
|
138
|
+
def get_tool(self, name: str) -> Tool | None:
|
|
139
|
+
return self._tools_by_name.get(name, None)
|
|
140
|
+
|
|
141
|
+
def contains(self, name: str) -> bool:
|
|
142
|
+
return name in self._open_ai_tools
|
|
143
|
+
|
|
144
|
+
def to_json(self) -> List[dict] | None:
|
|
145
|
+
if self._open_ai_tools == None:
|
|
146
|
+
return None
|
|
147
|
+
return self._open_ai_tools.copy()
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
# Converts a tool response into a series of messages that can be inserted into the openai context
|
|
151
|
+
class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
|
|
152
|
+
def __init__(self, blob_storage: Optional[BlobStorage] = None):
|
|
153
|
+
self._blob_storage = blob_storage
|
|
154
|
+
pass
|
|
155
|
+
|
|
156
|
+
async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
|
|
157
|
+
if isinstance(response, LinkResponse):
|
|
158
|
+
return json.dumps({
|
|
159
|
+
"name" : response.name,
|
|
160
|
+
"url" : response.url,
|
|
161
|
+
})
|
|
162
|
+
|
|
163
|
+
elif isinstance(response, JsonResponse):
|
|
164
|
+
|
|
165
|
+
return json.dumps(response.json)
|
|
166
|
+
|
|
167
|
+
elif isinstance(response, TextResponse):
|
|
168
|
+
return response.text
|
|
169
|
+
|
|
170
|
+
elif isinstance(response, FileResponse):
|
|
171
|
+
|
|
172
|
+
blob = Blob(mime_type=response.mime_type, data=response.data)
|
|
173
|
+
uri = self._blob_storage.store(blob=blob)
|
|
174
|
+
|
|
175
|
+
return f"The results have been written to a blob with the uri {uri} with the mime type {blob.mime_type}."
|
|
176
|
+
|
|
177
|
+
elif isinstance(response, EmptyResponse):
|
|
178
|
+
return "ok"
|
|
179
|
+
|
|
180
|
+
#elif isinstance(response, ImageResponse):
|
|
181
|
+
# context.messages.append({
|
|
182
|
+
# "role" : "assistant",
|
|
183
|
+
# "content" : "the user will upload the image",
|
|
184
|
+
# "tool_call_id" : tool_call.id,
|
|
185
|
+
# })
|
|
186
|
+
# context.messages.append({
|
|
187
|
+
# "role" : "user",
|
|
188
|
+
# "content" : [
|
|
189
|
+
# { "type" : "text", "text": "this is the image from tool call id {tool_call.id}" },
|
|
190
|
+
# { "type" : "image_url", "image_url": {"url": response.url, "detail": "auto"} }
|
|
191
|
+
# ]
|
|
192
|
+
# })
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
elif isinstance(response, dict):
|
|
196
|
+
return json.dumps(response)
|
|
197
|
+
|
|
198
|
+
elif isinstance(response, str):
|
|
199
|
+
return response
|
|
200
|
+
|
|
201
|
+
elif response == None:
|
|
202
|
+
return "ok"
|
|
203
|
+
|
|
204
|
+
else:
|
|
205
|
+
raise Exception("unexpected return type: {type}".format(type=type(response)))
|
|
206
|
+
|
|
207
|
+
async def create_messages(self, *, context: AgentChatContext, tool_call: ResponseFunctionToolCall, room: RoomClient, response: Response) -> list:
|
|
208
|
+
|
|
209
|
+
if isinstance(response, RawOutputs):
|
|
210
|
+
|
|
211
|
+
for output in response.outputs:
|
|
212
|
+
|
|
213
|
+
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : output })
|
|
214
|
+
|
|
215
|
+
return response.outputs
|
|
216
|
+
else:
|
|
217
|
+
output = await self.to_plain_text(room=room, response=response)
|
|
218
|
+
|
|
219
|
+
message = {
|
|
220
|
+
"output" : output,
|
|
221
|
+
"call_id" : tool_call.call_id,
|
|
222
|
+
"type" : "function_call_output"
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message })
|
|
227
|
+
|
|
228
|
+
return [ message ]
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
234
|
+
def __init__(self,
|
|
235
|
+
model: str = os.getenv("OPENAI_MODEL"),
|
|
236
|
+
parallel_tool_calls : Optional[bool] = None,
|
|
237
|
+
client: Optional[AsyncOpenAI] = None,
|
|
238
|
+
reasoning_effort: Optional[str] = None,
|
|
239
|
+
retries : int = 10,
|
|
240
|
+
):
|
|
241
|
+
self._model = model
|
|
242
|
+
self._parallel_tool_calls = parallel_tool_calls
|
|
243
|
+
self._client = client
|
|
244
|
+
self._reasoning_effort = reasoning_effort
|
|
245
|
+
self._retries = retries
|
|
246
|
+
|
|
247
|
+
def create_chat_context(self):
|
|
248
|
+
system_role = "system"
|
|
249
|
+
if self._model.startswith("o1"):
|
|
250
|
+
system_role = "developer"
|
|
251
|
+
elif self._model.startswith("o3"):
|
|
252
|
+
system_role = "developer"
|
|
253
|
+
elif self._model.startswith("computer-use"):
|
|
254
|
+
system_role = "developer"
|
|
255
|
+
|
|
256
|
+
context = AgentChatContext(
|
|
257
|
+
system_role=system_role
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
return context
|
|
261
|
+
|
|
262
|
+
async def check_for_termination(self, *, context: AgentChatContext, room: RoomClient) -> bool:
|
|
263
|
+
|
|
264
|
+
if len(context.previous_messages) > 0:
|
|
265
|
+
last_message = context.previous_messages[-1]
|
|
266
|
+
logger.info(f"last_message {last_message}")
|
|
267
|
+
|
|
268
|
+
for message in context.messages:
|
|
269
|
+
|
|
270
|
+
if message.get("type", "message") != "message":
|
|
271
|
+
logger.info(f"found {message.get("type", "message")}")
|
|
272
|
+
|
|
273
|
+
return False
|
|
274
|
+
|
|
275
|
+
return True
|
|
276
|
+
|
|
277
|
+
def _get_client(self, *, room: RoomClient) -> AsyncOpenAI:
|
|
278
|
+
if self._client != None:
|
|
279
|
+
openai = self._client
|
|
280
|
+
else:
|
|
281
|
+
token : str = room.protocol.token
|
|
282
|
+
url : str = room.room_url
|
|
283
|
+
|
|
284
|
+
room_proxy_url = f"{url}/v1"
|
|
285
|
+
|
|
286
|
+
openai=AsyncOpenAI(
|
|
287
|
+
api_key=token,
|
|
288
|
+
base_url=room_proxy_url,
|
|
289
|
+
default_headers={
|
|
290
|
+
"Meshagent-Session" : room.session_id
|
|
291
|
+
}
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
return openai
|
|
295
|
+
|
|
296
|
+
# Takes the current chat context, executes a completion request and processes the response.
|
|
297
|
+
# If a tool calls are requested, invokes the tools, processes the tool calls results, and appends the tool call results to the context
|
|
298
|
+
async def next(self,
|
|
299
|
+
*,
|
|
300
|
+
context: AgentChatContext,
|
|
301
|
+
room: RoomClient,
|
|
302
|
+
toolkits: Toolkit,
|
|
303
|
+
tool_adapter: Optional[ToolResponseAdapter] = None,
|
|
304
|
+
output_schema: Optional[dict] = None,
|
|
305
|
+
event_handler: Optional[Callable[[ResponseStreamEvent],None]] = None
|
|
306
|
+
):
|
|
307
|
+
if tool_adapter == None:
|
|
308
|
+
tool_adapter = OpenAIResponsesToolResponseAdapter()
|
|
309
|
+
|
|
310
|
+
try:
|
|
311
|
+
|
|
312
|
+
openai = self._get_client(room=room)
|
|
313
|
+
|
|
314
|
+
tool_bundle = ResponsesToolBundle(toolkits=[
|
|
315
|
+
*toolkits,
|
|
316
|
+
])
|
|
317
|
+
open_ai_tools = tool_bundle.to_json()
|
|
318
|
+
|
|
319
|
+
if open_ai_tools != None:
|
|
320
|
+
logger.info("OpenAI Tools: %s", json.dumps(open_ai_tools))
|
|
321
|
+
else:
|
|
322
|
+
logger.info("OpenAI Tools: Empty")
|
|
323
|
+
open_ai_tools = NOT_GIVEN
|
|
324
|
+
|
|
325
|
+
response_schema = output_schema
|
|
326
|
+
response_name = "response"
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
while True:
|
|
330
|
+
|
|
331
|
+
logger.info("model: %s, context: %s, output_schema: %s", self._model, context.messages, output_schema)
|
|
332
|
+
ptc = self._parallel_tool_calls
|
|
333
|
+
extra = {}
|
|
334
|
+
if ptc != None and self._model.startswith("o") == False:
|
|
335
|
+
extra["parallel_tool_calls"] = ptc
|
|
336
|
+
|
|
337
|
+
trunc = NOT_GIVEN
|
|
338
|
+
if self._model == "computer-use-preview":
|
|
339
|
+
trunc = "auto"
|
|
340
|
+
|
|
341
|
+
text = NOT_GIVEN
|
|
342
|
+
if output_schema != None:
|
|
343
|
+
text = {
|
|
344
|
+
"format" : {
|
|
345
|
+
"type" : "json_schema",
|
|
346
|
+
"name" : response_name,
|
|
347
|
+
"schema" : response_schema,
|
|
348
|
+
"strict" : True,
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
reasoning = NOT_GIVEN
|
|
353
|
+
if self._reasoning_effort != None:
|
|
354
|
+
reasoning = {
|
|
355
|
+
"effort" : self._reasoning_effort
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
previous_response_id = NOT_GIVEN
|
|
359
|
+
if context.previous_response_id != None:
|
|
360
|
+
previous_response_id = context.previous_response_id
|
|
361
|
+
|
|
362
|
+
stream = event_handler != None
|
|
363
|
+
|
|
364
|
+
for i in range(self._retries + 1):
|
|
365
|
+
if range == self._retries:
|
|
366
|
+
raise RoomException("exceeded maximum attempts calling openai")
|
|
367
|
+
try:
|
|
368
|
+
response = await openai.responses.create(
|
|
369
|
+
stream=stream,
|
|
370
|
+
model = self._model,
|
|
371
|
+
input = context.messages,
|
|
372
|
+
tools = open_ai_tools,
|
|
373
|
+
text = text,
|
|
374
|
+
previous_response_id=previous_response_id,
|
|
375
|
+
reasoning=reasoning,
|
|
376
|
+
truncation=trunc
|
|
377
|
+
)
|
|
378
|
+
break
|
|
379
|
+
except Exception as e:
|
|
380
|
+
logger.error(f"error calling openai attempt: {i+1}", exc_info=e)
|
|
381
|
+
if i == self._retries:
|
|
382
|
+
raise
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
async def handle_message(message):
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
if message.type == "function_call":
|
|
389
|
+
|
|
390
|
+
tasks = []
|
|
391
|
+
|
|
392
|
+
async def do_tool_call(tool_call: ResponseFunctionToolCall):
|
|
393
|
+
try:
|
|
394
|
+
tool_context = ToolContext(
|
|
395
|
+
room=room,
|
|
396
|
+
caller=room.local_participant,
|
|
397
|
+
)
|
|
398
|
+
tool_response = await tool_bundle.execute(context=tool_context, tool_call=tool_call)
|
|
399
|
+
logger.info(f"tool response {tool_response}")
|
|
400
|
+
return await tool_adapter.create_messages(context=context, tool_call=tool_call, room=room, response=tool_response)
|
|
401
|
+
except Exception as e:
|
|
402
|
+
logger.error(f"unable to complete tool call {tool_call}", exc_info=e)
|
|
403
|
+
room.developer.log_nowait(type="llm.error", data={ "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "error" : f"{e}" })
|
|
404
|
+
|
|
405
|
+
return [{
|
|
406
|
+
"output" : json.dumps({"error":f"unable to complete tool call: {e}"}),
|
|
407
|
+
"call_id" : tool_call.call_id,
|
|
408
|
+
"type" : "function_call_output"
|
|
409
|
+
}]
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
tasks.append(asyncio.create_task(do_tool_call(message)))
|
|
413
|
+
|
|
414
|
+
results = await asyncio.gather(*tasks)
|
|
415
|
+
|
|
416
|
+
all_results = []
|
|
417
|
+
for result in results:
|
|
418
|
+
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : result })
|
|
419
|
+
all_results.extend(result)
|
|
420
|
+
|
|
421
|
+
return all_results, False
|
|
422
|
+
|
|
423
|
+
elif message.type == "computer_call" and tool_bundle.get_tool("computer_call"):
|
|
424
|
+
tool_context = ToolContext(
|
|
425
|
+
room=room,
|
|
426
|
+
caller=room.local_participant,
|
|
427
|
+
)
|
|
428
|
+
outputs = (await tool_bundle.get_tool("computer_call").execute(context=tool_context, arguments=message.to_dict(mode="json"))).outputs
|
|
429
|
+
|
|
430
|
+
return outputs, False
|
|
431
|
+
|
|
432
|
+
elif message.type == "reasoning":
|
|
433
|
+
reasoning = tool_bundle.get_tool("reasoning_tool")
|
|
434
|
+
if reasoning != None:
|
|
435
|
+
await tool_bundle.get_tool("reasoning_tool").execute(context=tool_context, arguments=message.to_dict(mode="json"))
|
|
436
|
+
|
|
437
|
+
elif message.type == "message":
|
|
438
|
+
|
|
439
|
+
contents = message.content
|
|
440
|
+
if response_schema == None:
|
|
441
|
+
return [], False
|
|
442
|
+
else:
|
|
443
|
+
for content in contents:
|
|
444
|
+
# First try to parse the result
|
|
445
|
+
try:
|
|
446
|
+
full_response = json.loads(content.text)
|
|
447
|
+
# sometimes open ai packs two JSON chunks seperated by newline, check if that's why we couldn't parse
|
|
448
|
+
except json.decoder.JSONDecodeError as e:
|
|
449
|
+
for part in content.text.splitlines():
|
|
450
|
+
if len(part.strip()) > 0:
|
|
451
|
+
full_response = json.loads(part)
|
|
452
|
+
|
|
453
|
+
try:
|
|
454
|
+
self.validate(response=full_response, output_schema=response_schema)
|
|
455
|
+
except Exception as e:
|
|
456
|
+
logger.error("recieved invalid response, retrying", exc_info=e)
|
|
457
|
+
error = { "role" : "user", "content" : "encountered a validation error with the output: {error}".format(error=e)}
|
|
458
|
+
room.developer.log_nowait(type="llm.message", data={ "context" : message.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : error })
|
|
459
|
+
context.messages.append(error)
|
|
460
|
+
continue
|
|
461
|
+
|
|
462
|
+
return [ full_response ], True
|
|
463
|
+
else:
|
|
464
|
+
raise RoomException("Unexpected response from OpenAI {response}".format(response=message))
|
|
465
|
+
|
|
466
|
+
return [], False
|
|
467
|
+
|
|
468
|
+
if stream == False:
|
|
469
|
+
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "response" : response.to_dict() })
|
|
470
|
+
|
|
471
|
+
context.create_response(response.id)
|
|
472
|
+
|
|
473
|
+
final_outputs = []
|
|
474
|
+
|
|
475
|
+
for message in response.output:
|
|
476
|
+
outputs, done = await handle_message(message=message)
|
|
477
|
+
if done:
|
|
478
|
+
final_outputs.extend(outputs)
|
|
479
|
+
else:
|
|
480
|
+
for output in outputs:
|
|
481
|
+
context.messages.append(output)
|
|
482
|
+
|
|
483
|
+
if len(final_outputs) > 0:
|
|
484
|
+
|
|
485
|
+
return final_outputs[0]
|
|
486
|
+
|
|
487
|
+
else:
|
|
488
|
+
|
|
489
|
+
final_outputs = []
|
|
490
|
+
all_outputs = []
|
|
491
|
+
async for e in response:
|
|
492
|
+
|
|
493
|
+
event : ResponseStreamEvent = e
|
|
494
|
+
|
|
495
|
+
event_handler(event)
|
|
496
|
+
|
|
497
|
+
if event.type == "response.completed":
|
|
498
|
+
context.create_response(event.response.id)
|
|
499
|
+
|
|
500
|
+
context.messages.extend(all_outputs)
|
|
501
|
+
|
|
502
|
+
term = await self.check_for_termination(context=context, room=room)
|
|
503
|
+
if term:
|
|
504
|
+
text = ""
|
|
505
|
+
for output in event.response.output:
|
|
506
|
+
if output.type == "message":
|
|
507
|
+
for content in output.content:
|
|
508
|
+
text += content.text
|
|
509
|
+
|
|
510
|
+
return text
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
all_outputs = []
|
|
514
|
+
|
|
515
|
+
elif event.type == "response.output_item.done":
|
|
516
|
+
|
|
517
|
+
context.previous_messages.append(event.item.to_dict())
|
|
518
|
+
|
|
519
|
+
outputs, done = await handle_message(message=event.item)
|
|
520
|
+
if done:
|
|
521
|
+
final_outputs.extend(outputs)
|
|
522
|
+
else:
|
|
523
|
+
for output in outputs:
|
|
524
|
+
all_outputs.append(output)
|
|
525
|
+
|
|
526
|
+
if len(final_outputs) > 0:
|
|
527
|
+
|
|
528
|
+
return final_outputs[0]
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
|
|
532
|
+
|
|
533
|
+
except APIStatusError as e:
|
|
534
|
+
raise RoomException(f"Error from OpenAI: {e}")
|
|
535
|
+
|
|
536
|
+
|
|
537
|
+
|
|
538
|
+
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: meshagent-openai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.4
|
|
4
4
|
Summary: OpenAI Building Blocks for Meshagent
|
|
5
5
|
Home-page:
|
|
6
6
|
License: Apache License 2.0
|
|
@@ -14,9 +14,9 @@ Requires-Dist: pyjwt>=2.0.0
|
|
|
14
14
|
Requires-Dist: pytest>=8.3.4
|
|
15
15
|
Requires-Dist: pytest-asyncio>=0.24.0
|
|
16
16
|
Requires-Dist: openai>=1.66.2
|
|
17
|
-
Requires-Dist: meshagent-api>=0.0.
|
|
18
|
-
Requires-Dist: meshagent-agents>=0.0.
|
|
19
|
-
Requires-Dist: meshagent-tools>=0.0.
|
|
17
|
+
Requires-Dist: meshagent-api>=0.0.4
|
|
18
|
+
Requires-Dist: meshagent-agents>=0.0.4
|
|
19
|
+
Requires-Dist: meshagent-tools>=0.0.4
|
|
20
20
|
Dynamic: description-content-type
|
|
21
21
|
Dynamic: license
|
|
22
22
|
Dynamic: project-url
|
|
@@ -29,9 +29,9 @@ setuptools.setup(
|
|
|
29
29
|
"pytest>=8.3.4",
|
|
30
30
|
"pytest-asyncio>=0.24.0",
|
|
31
31
|
"openai>=1.66.2",
|
|
32
|
-
"meshagent-api>=0.0.
|
|
33
|
-
"meshagent-agents>=0.0.
|
|
34
|
-
"meshagent-tools>=0.0.
|
|
32
|
+
"meshagent-api>=0.0.4",
|
|
33
|
+
"meshagent-agents>=0.0.4",
|
|
34
|
+
"meshagent-tools>=0.0.4"
|
|
35
35
|
],
|
|
36
36
|
package_data={
|
|
37
37
|
"meshagent.openai": ["py.typed", "*.pyi", "**/*.pyi", "**/*.js"],
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.0.4"
|
|
@@ -1,388 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
from meshagent.agents.agent import Agent, AgentChatContext, AgentCallContext
|
|
3
|
-
from meshagent.api import WebSocketClientProtocol, RoomClient, RoomException
|
|
4
|
-
from meshagent.tools.blob import Blob, BlobStorage
|
|
5
|
-
from meshagent.tools import Toolkit, ToolContext, Tool
|
|
6
|
-
from meshagent.api.messaging import Response, LinkResponse, FileResponse, JsonResponse, TextResponse, EmptyResponse
|
|
7
|
-
from meshagent.agents.schema import prompt_schema
|
|
8
|
-
from meshagent.agents.adapter import ToolResponseAdapter, LLMAdapter
|
|
9
|
-
from uuid import uuid4
|
|
10
|
-
import json
|
|
11
|
-
from jsonschema import validate
|
|
12
|
-
from typing import List, Dict
|
|
13
|
-
|
|
14
|
-
from openai import AsyncOpenAI, APIStatusError, NOT_GIVEN
|
|
15
|
-
from openai.types.chat import ChatCompletionMessageToolCall
|
|
16
|
-
from openai.types.responses import ResponseFunctionToolCall
|
|
17
|
-
|
|
18
|
-
from copy import deepcopy
|
|
19
|
-
from abc import abstractmethod, ABC
|
|
20
|
-
import os
|
|
21
|
-
import jsonschema
|
|
22
|
-
from typing import Optional, Any
|
|
23
|
-
|
|
24
|
-
import logging
|
|
25
|
-
import re
|
|
26
|
-
import asyncio
|
|
27
|
-
|
|
28
|
-
logging.basicConfig()
|
|
29
|
-
logger = logging.getLogger("openai_agent")
|
|
30
|
-
logger.setLevel(logging.INFO)
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> str:
|
|
36
|
-
"""
|
|
37
|
-
Replaces every character in `text` that does not match the given
|
|
38
|
-
`allowed_chars` regex set with `replacement`.
|
|
39
|
-
|
|
40
|
-
Parameters:
|
|
41
|
-
-----------
|
|
42
|
-
text : str
|
|
43
|
-
The input string on which the replacement is to be done.
|
|
44
|
-
allowed_chars : str
|
|
45
|
-
A string defining the set of allowed characters (part of a character set).
|
|
46
|
-
For example, "a-zA-Z0-9" will keep only letters and digits.
|
|
47
|
-
replacement : str
|
|
48
|
-
The string to replace non-matching characters with.
|
|
49
|
-
|
|
50
|
-
Returns:
|
|
51
|
-
--------
|
|
52
|
-
str
|
|
53
|
-
A new string where all characters not in `allowed_chars` are replaced.
|
|
54
|
-
"""
|
|
55
|
-
# Build a regex that matches any character NOT in allowed_chars
|
|
56
|
-
pattern = rf"[^{allowed_chars}]"
|
|
57
|
-
return re.sub(pattern, replacement, text)
|
|
58
|
-
|
|
59
|
-
def safe_tool_name(name: str):
|
|
60
|
-
return _replace_non_matching(name, "a-zA-Z0-9_-", "_")
|
|
61
|
-
|
|
62
|
-
# Collects a group of tool proxies and manages execution of openai tool calls
|
|
63
|
-
class ResponsesToolBundle:
|
|
64
|
-
def __init__(self, toolkits: List[Toolkit]):
|
|
65
|
-
self._toolkits = toolkits
|
|
66
|
-
self._executors = dict[str, Toolkit]()
|
|
67
|
-
self._safe_names = {}
|
|
68
|
-
|
|
69
|
-
open_ai_tools = []
|
|
70
|
-
|
|
71
|
-
for toolkit in toolkits:
|
|
72
|
-
for v in toolkit.tools:
|
|
73
|
-
|
|
74
|
-
k = v.name
|
|
75
|
-
|
|
76
|
-
name = safe_tool_name(k)
|
|
77
|
-
|
|
78
|
-
if k in self._executors:
|
|
79
|
-
raise Exception(f"duplicate in bundle '{k}', tool names must be unique.")
|
|
80
|
-
|
|
81
|
-
self._executors[k] = toolkit
|
|
82
|
-
|
|
83
|
-
self._safe_names[name] = k
|
|
84
|
-
|
|
85
|
-
fn = {
|
|
86
|
-
"type" : "function",
|
|
87
|
-
"name" : name,
|
|
88
|
-
"description" : v.description,
|
|
89
|
-
"parameters" : {
|
|
90
|
-
**v.input_schema,
|
|
91
|
-
},
|
|
92
|
-
"strict": True,
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
if v.defs != None:
|
|
97
|
-
fn["parameters"]["$defs"] = v.defs
|
|
98
|
-
|
|
99
|
-
open_ai_tools.append(fn)
|
|
100
|
-
|
|
101
|
-
if len(open_ai_tools) == 0:
|
|
102
|
-
open_ai_tools = None
|
|
103
|
-
|
|
104
|
-
self._open_ai_tools = open_ai_tools
|
|
105
|
-
|
|
106
|
-
async def execute(self, *, context: ToolContext, tool_call: ResponseFunctionToolCall) -> Response:
|
|
107
|
-
try:
|
|
108
|
-
|
|
109
|
-
name = tool_call.name
|
|
110
|
-
arguments = json.loads(tool_call.arguments)
|
|
111
|
-
|
|
112
|
-
if name not in self._safe_names:
|
|
113
|
-
raise RoomException(f"Invalid tool name {name}, check the name of the tool")
|
|
114
|
-
|
|
115
|
-
name = self._safe_names[name]
|
|
116
|
-
|
|
117
|
-
if name not in self._executors:
|
|
118
|
-
raise Exception(f"Unregistered tool name {name}")
|
|
119
|
-
|
|
120
|
-
logger.info("executing %s %s %s", tool_call.id, name, arguments)
|
|
121
|
-
|
|
122
|
-
proxy = self._executors[name]
|
|
123
|
-
result = await proxy.execute(context=context, name=name, arguments=arguments)
|
|
124
|
-
logger.info("success calling %s %s %s", tool_call.id, name, result)
|
|
125
|
-
return result
|
|
126
|
-
|
|
127
|
-
except Exception as e:
|
|
128
|
-
logger.error("failed calling %s %s", tool_call.id, name, exc_info=e)
|
|
129
|
-
raise
|
|
130
|
-
|
|
131
|
-
def contains(self, name: str) -> bool:
|
|
132
|
-
return name in self._open_ai_tools
|
|
133
|
-
|
|
134
|
-
def to_json(self) -> List[dict] | None:
|
|
135
|
-
if self._open_ai_tools == None:
|
|
136
|
-
return None
|
|
137
|
-
return self._open_ai_tools.copy()
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
# Converts a tool response into a series of messages that can be inserted into the openai context
|
|
141
|
-
class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
|
|
142
|
-
def __init__(self, blob_storage: Optional[BlobStorage] = None):
|
|
143
|
-
self._blob_storage = blob_storage
|
|
144
|
-
pass
|
|
145
|
-
|
|
146
|
-
async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
|
|
147
|
-
if isinstance(response, LinkResponse):
|
|
148
|
-
return json.dumps({
|
|
149
|
-
"name" : response.name,
|
|
150
|
-
"url" : response.url,
|
|
151
|
-
})
|
|
152
|
-
|
|
153
|
-
elif isinstance(response, JsonResponse):
|
|
154
|
-
return json.dumps(response.json)
|
|
155
|
-
|
|
156
|
-
elif isinstance(response, TextResponse):
|
|
157
|
-
return response.text
|
|
158
|
-
|
|
159
|
-
elif isinstance(response, FileResponse):
|
|
160
|
-
|
|
161
|
-
blob = Blob(mime_type=response.mime_type, data=response.data)
|
|
162
|
-
uri = self._blob_storage.store(blob=blob)
|
|
163
|
-
|
|
164
|
-
return f"The results have been written to a blob with the uri {uri} with the mime type {blob.mime_type}."
|
|
165
|
-
|
|
166
|
-
elif isinstance(response, EmptyResponse):
|
|
167
|
-
return "ok"
|
|
168
|
-
|
|
169
|
-
#elif isinstance(response, ImageResponse):
|
|
170
|
-
# context.messages.append({
|
|
171
|
-
# "role" : "assistant",
|
|
172
|
-
# "content" : "the user will upload the image",
|
|
173
|
-
# "tool_call_id" : tool_call.id,
|
|
174
|
-
# })
|
|
175
|
-
# context.messages.append({
|
|
176
|
-
# "role" : "user",
|
|
177
|
-
# "content" : [
|
|
178
|
-
# { "type" : "text", "text": "this is the image from tool call id {tool_call.id}" },
|
|
179
|
-
# { "type" : "image_url", "image_url": {"url": response.url, "detail": "auto"} }
|
|
180
|
-
# ]
|
|
181
|
-
# })
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
elif isinstance(response, dict):
|
|
185
|
-
return json.dumps(response)
|
|
186
|
-
|
|
187
|
-
elif isinstance(response, str):
|
|
188
|
-
return response
|
|
189
|
-
|
|
190
|
-
elif response == None:
|
|
191
|
-
return "ok"
|
|
192
|
-
|
|
193
|
-
else:
|
|
194
|
-
raise Exception("unexpected return type: {type}".format(type=type(response)))
|
|
195
|
-
|
|
196
|
-
async def append_messages(self, *, context: AgentChatContext, tool_call: ResponseFunctionToolCall, room: RoomClient, response: Response) -> list:
|
|
197
|
-
|
|
198
|
-
message = {
|
|
199
|
-
"output" : await self.to_plain_text(room=room, response=response),
|
|
200
|
-
"call_id" : tool_call.call_id,
|
|
201
|
-
"type" : "function_call_output"
|
|
202
|
-
}
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message })
|
|
206
|
-
|
|
207
|
-
context.messages.append(message)
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
class OpenAIResponsesAdapter(LLMAdapter):
|
|
213
|
-
def __init__(self,
|
|
214
|
-
model: str = os.getenv("OPENAI_MODEL"),
|
|
215
|
-
parallel_tool_calls : Optional[bool] = None,
|
|
216
|
-
client: Optional[AsyncOpenAI] = None,
|
|
217
|
-
reasoning_effort: Optional[str] = None,
|
|
218
|
-
):
|
|
219
|
-
self._model = model
|
|
220
|
-
self._parallel_tool_calls = parallel_tool_calls
|
|
221
|
-
self._client = client
|
|
222
|
-
self._reasoning_effort = reasoning_effort
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
def create_chat_context(self):
|
|
226
|
-
system_role = "system"
|
|
227
|
-
if self._model.startswith("o1"):
|
|
228
|
-
system_role = "developer"
|
|
229
|
-
elif self._model.startswith("o3"):
|
|
230
|
-
system_role = "developer"
|
|
231
|
-
|
|
232
|
-
context = AgentChatContext(
|
|
233
|
-
system_role=system_role
|
|
234
|
-
)
|
|
235
|
-
|
|
236
|
-
return context
|
|
237
|
-
|
|
238
|
-
# Takes the current chat context, executes a completion request and processes the response.
|
|
239
|
-
# If a tool calls are requested, invokes the tools, processes the tool calls results, and appends the tool call results to the context
|
|
240
|
-
async def next(self,
|
|
241
|
-
*,
|
|
242
|
-
context: AgentChatContext,
|
|
243
|
-
room: RoomClient,
|
|
244
|
-
toolkits: Toolkit,
|
|
245
|
-
tool_adapter: Optional[ToolResponseAdapter] = None,
|
|
246
|
-
output_schema: Optional[dict] = None,
|
|
247
|
-
):
|
|
248
|
-
if tool_adapter == None:
|
|
249
|
-
tool_adapter = OpenAIResponsesToolResponseAdapter()
|
|
250
|
-
|
|
251
|
-
try:
|
|
252
|
-
if self._client != None:
|
|
253
|
-
openai = self._client
|
|
254
|
-
else:
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
token : str = room.protocol.token
|
|
258
|
-
url : str = room.room_url
|
|
259
|
-
|
|
260
|
-
room_proxy_url = f"{url}/v1"
|
|
261
|
-
|
|
262
|
-
openai=AsyncOpenAI(
|
|
263
|
-
api_key=token,
|
|
264
|
-
base_url=room_proxy_url,
|
|
265
|
-
default_headers={
|
|
266
|
-
"Meshagent-Session" : room.session_id
|
|
267
|
-
}
|
|
268
|
-
)
|
|
269
|
-
|
|
270
|
-
tool_bundle = ResponsesToolBundle(toolkits=[
|
|
271
|
-
*toolkits,
|
|
272
|
-
])
|
|
273
|
-
open_ai_tools = tool_bundle.to_json()
|
|
274
|
-
|
|
275
|
-
if open_ai_tools != None:
|
|
276
|
-
logger.info("OpenAI Tools: %s", json.dumps(open_ai_tools))
|
|
277
|
-
else:
|
|
278
|
-
logger.info("OpenAI Tools: Empty")
|
|
279
|
-
|
|
280
|
-
response_schema = output_schema
|
|
281
|
-
response_name = "response"
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
while context.messages[-1].get("role") != "assistant" if context.messages else True:
|
|
285
|
-
logger.info("model: %s, context: %s, output_schema: %s", self._model, context.messages, output_schema)
|
|
286
|
-
ptc = self._parallel_tool_calls
|
|
287
|
-
extra = {}
|
|
288
|
-
if ptc != None and self._model.startswith("o") == False:
|
|
289
|
-
extra["parallel_tool_calls"] = ptc
|
|
290
|
-
|
|
291
|
-
text = NOT_GIVEN
|
|
292
|
-
if output_schema != None:
|
|
293
|
-
text = {
|
|
294
|
-
"format" : {
|
|
295
|
-
"type" : "json_schema",
|
|
296
|
-
"name" : response_name,
|
|
297
|
-
"schema" : response_schema,
|
|
298
|
-
"strict" : True,
|
|
299
|
-
}
|
|
300
|
-
}
|
|
301
|
-
|
|
302
|
-
reasoning = NOT_GIVEN
|
|
303
|
-
if self._reasoning_effort != None:
|
|
304
|
-
reasoning = {
|
|
305
|
-
"effort" : self._reasoning_effort
|
|
306
|
-
}
|
|
307
|
-
|
|
308
|
-
response = await openai.responses.create(
|
|
309
|
-
model = self._model,
|
|
310
|
-
input = context.messages,
|
|
311
|
-
tools = open_ai_tools,
|
|
312
|
-
text = text,
|
|
313
|
-
reasoning=reasoning,
|
|
314
|
-
)
|
|
315
|
-
|
|
316
|
-
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "response" : response.to_dict() })
|
|
317
|
-
|
|
318
|
-
for message in response.output:
|
|
319
|
-
context.messages.append(message)
|
|
320
|
-
|
|
321
|
-
if message.type == "function_call":
|
|
322
|
-
|
|
323
|
-
tasks = []
|
|
324
|
-
|
|
325
|
-
async def do_tool_call(tool_call: ResponseFunctionToolCall):
|
|
326
|
-
try:
|
|
327
|
-
tool_context = ToolContext(
|
|
328
|
-
room=room,
|
|
329
|
-
caller=room.local_participant,
|
|
330
|
-
)
|
|
331
|
-
tool_response = await tool_bundle.execute(context=tool_context, tool_call=tool_call)
|
|
332
|
-
logger.info(f"tool response {tool_response}")
|
|
333
|
-
await tool_adapter.append_messages(context=context, tool_call=tool_call, room=room, response=tool_response)
|
|
334
|
-
except Exception as e:
|
|
335
|
-
logger.error(f"unable to complete tool call {tool_call}", exc_info=e)
|
|
336
|
-
room.developer.log_nowait(type="llm.error", data={ "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "error" : f"{e}" })
|
|
337
|
-
|
|
338
|
-
return {
|
|
339
|
-
"output" : json.dumps({"error":f"unable to complete tool call: {e}"}),
|
|
340
|
-
"call_id" : tool_call.call_id,
|
|
341
|
-
"type" : "function_call_output"
|
|
342
|
-
}
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
tasks.append(asyncio.create_task(do_tool_call(message)))
|
|
346
|
-
|
|
347
|
-
results = await asyncio.gather(*tasks)
|
|
348
|
-
|
|
349
|
-
for result in results:
|
|
350
|
-
if result != None:
|
|
351
|
-
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : result })
|
|
352
|
-
context.messages.append(result)
|
|
353
|
-
|
|
354
|
-
elif response.output_text != None:
|
|
355
|
-
|
|
356
|
-
content = response.output_text
|
|
357
|
-
|
|
358
|
-
logger.info("RESPONSE FROM OPENAI %s", content)
|
|
359
|
-
if response_schema == None:
|
|
360
|
-
return content
|
|
361
|
-
|
|
362
|
-
# First try to parse the result
|
|
363
|
-
try:
|
|
364
|
-
full_response = json.loads(content)
|
|
365
|
-
# sometimes open ai packs two JSON chunks seperated by newline, check if that's why we couldn't parse
|
|
366
|
-
except json.decoder.JSONDecodeError as e:
|
|
367
|
-
for part in content.splitlines():
|
|
368
|
-
if len(part.strip()) > 0:
|
|
369
|
-
full_response = json.loads(part)
|
|
370
|
-
|
|
371
|
-
try:
|
|
372
|
-
self.validate(response=full_response, output_schema=response_schema)
|
|
373
|
-
except Exception as e:
|
|
374
|
-
logger.error("recieved invalid response, retrying", exc_info=e)
|
|
375
|
-
error = { "role" : "user", "content" : "encountered a validation error with the output: {error}".format(error=e)}
|
|
376
|
-
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : error })
|
|
377
|
-
context.messages.append(error)
|
|
378
|
-
continue
|
|
379
|
-
|
|
380
|
-
return full_response
|
|
381
|
-
else:
|
|
382
|
-
raise RoomException("Unexpected response from OpenAI {response}".format(response=message))
|
|
383
|
-
except APIStatusError as e:
|
|
384
|
-
raise RoomException(f"Error from OpenAI: {e}")
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.0.3"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{meshagent_openai-0.0.3 → meshagent_openai-0.0.4}/meshagent_openai.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|