meshagent-openai 0.0.37__py3-none-any.whl → 0.0.39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of meshagent-openai might be problematic. Click here for more details.
- meshagent/openai/__init__.py +16 -2
- meshagent/openai/proxy/__init__.py +2 -0
- meshagent/openai/proxy/proxy.py +7 -10
- meshagent/openai/tools/__init__.py +17 -2
- meshagent/openai/tools/completions_adapter.py +226 -139
- meshagent/openai/tools/responses_adapter.py +1028 -500
- meshagent/openai/tools/schema.py +100 -49
- meshagent/openai/tools/stt.py +67 -38
- meshagent/openai/tools/stt_test.py +6 -4
- meshagent/openai/version.py +1 -1
- meshagent_openai-0.0.39.dist-info/METADATA +50 -0
- meshagent_openai-0.0.39.dist-info/RECORD +15 -0
- meshagent_openai-0.0.37.dist-info/METADATA +0 -21
- meshagent_openai-0.0.37.dist-info/RECORD +0 -15
- {meshagent_openai-0.0.37.dist-info → meshagent_openai-0.0.39.dist-info}/WHEEL +0 -0
- {meshagent_openai-0.0.37.dist-info → meshagent_openai-0.0.39.dist-info}/licenses/LICENSE +0 -0
- {meshagent_openai-0.0.37.dist-info → meshagent_openai-0.0.39.dist-info}/top_level.txt +0 -0
meshagent/openai/__init__.py
CHANGED
|
@@ -1,2 +1,16 @@
|
|
|
1
|
-
from .tools import
|
|
2
|
-
|
|
1
|
+
from .tools import (
|
|
2
|
+
OpenAICompletionsAdapter,
|
|
3
|
+
OpenAIResponsesAdapter,
|
|
4
|
+
OpenAICompletionsToolResponseAdapter,
|
|
5
|
+
OpenAIResponsesToolResponseAdapter,
|
|
6
|
+
)
|
|
7
|
+
from .version import __version__
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
__version__,
|
|
12
|
+
OpenAICompletionsAdapter,
|
|
13
|
+
OpenAIResponsesAdapter,
|
|
14
|
+
OpenAICompletionsToolResponseAdapter,
|
|
15
|
+
OpenAIResponsesToolResponseAdapter,
|
|
16
|
+
]
|
meshagent/openai/proxy/proxy.py
CHANGED
|
@@ -1,26 +1,23 @@
|
|
|
1
|
-
import os
|
|
2
1
|
from meshagent.api import RoomClient
|
|
3
2
|
from openai import AsyncOpenAI
|
|
4
3
|
|
|
4
|
+
|
|
5
5
|
def get_client(*, room: RoomClient) -> AsyncOpenAI:
|
|
6
|
+
token: str = room.protocol.token
|
|
6
7
|
|
|
7
|
-
token : str = room.protocol.token
|
|
8
|
-
|
|
9
8
|
# when running inside the room pod, the room.room_url currently points to the external url
|
|
10
9
|
# so we need to use url off the protocol (if available).
|
|
11
10
|
# TODO: room_url should be set properly, but may need a claim in the token to be set during call to say it is local
|
|
12
|
-
url
|
|
13
|
-
|
|
11
|
+
url: str = getattr(room.protocol, "url", room.room_url)
|
|
12
|
+
|
|
14
13
|
room_proxy_url = f"{url}/v1"
|
|
15
14
|
|
|
16
15
|
if room_proxy_url.startswith("ws:") or room_proxy_url.startswith("wss:"):
|
|
17
|
-
room_proxy_url = room_proxy_url.replace("ws","http",1)
|
|
16
|
+
room_proxy_url = room_proxy_url.replace("ws", "http", 1)
|
|
18
17
|
|
|
19
|
-
openai=AsyncOpenAI(
|
|
18
|
+
openai = AsyncOpenAI(
|
|
20
19
|
api_key=token,
|
|
21
20
|
base_url=room_proxy_url,
|
|
22
|
-
default_headers={
|
|
23
|
-
"Meshagent-Session" : room.session_id
|
|
24
|
-
}
|
|
21
|
+
default_headers={"Meshagent-Session": room.session_id},
|
|
25
22
|
)
|
|
26
23
|
return openai
|
|
@@ -1,3 +1,18 @@
|
|
|
1
|
-
from .responses_adapter import
|
|
2
|
-
|
|
1
|
+
from .responses_adapter import (
|
|
2
|
+
OpenAIResponsesAdapter,
|
|
3
|
+
OpenAIResponsesToolResponseAdapter,
|
|
4
|
+
)
|
|
5
|
+
from .completions_adapter import (
|
|
6
|
+
OpenAICompletionsAdapter,
|
|
7
|
+
OpenAICompletionsToolResponseAdapter,
|
|
8
|
+
)
|
|
3
9
|
from .stt import OpenAIAudioFileSTT, OpenAISTTToolkit
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
OpenAIResponsesAdapter,
|
|
13
|
+
OpenAIResponsesToolResponseAdapter,
|
|
14
|
+
OpenAICompletionsAdapter,
|
|
15
|
+
OpenAICompletionsToolResponseAdapter,
|
|
16
|
+
OpenAIAudioFileSTT,
|
|
17
|
+
OpenAISTTToolkit,
|
|
18
|
+
]
|
|
@@ -1,15 +1,21 @@
|
|
|
1
|
-
|
|
2
1
|
from meshagent.agents.agent import AgentChatContext
|
|
3
2
|
from meshagent.api import RoomClient, RoomException
|
|
4
3
|
from meshagent.tools.blob import Blob, BlobStorage
|
|
5
4
|
from meshagent.tools import Toolkit, ToolContext
|
|
6
|
-
from meshagent.api.messaging import
|
|
5
|
+
from meshagent.api.messaging import (
|
|
6
|
+
Response,
|
|
7
|
+
LinkResponse,
|
|
8
|
+
FileResponse,
|
|
9
|
+
JsonResponse,
|
|
10
|
+
TextResponse,
|
|
11
|
+
EmptyResponse,
|
|
12
|
+
)
|
|
7
13
|
from meshagent.agents.adapter import ToolResponseAdapter, LLMAdapter
|
|
8
14
|
import json
|
|
9
15
|
from typing import List
|
|
10
16
|
|
|
11
17
|
from openai import AsyncOpenAI, APIStatusError
|
|
12
|
-
from openai.types.chat import ChatCompletion,
|
|
18
|
+
from openai.types.chat import ChatCompletion, ChatCompletionMessageToolCall
|
|
13
19
|
|
|
14
20
|
import os
|
|
15
21
|
from typing import Optional, Any
|
|
@@ -27,7 +33,7 @@ def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> st
|
|
|
27
33
|
"""
|
|
28
34
|
Replaces every character in `text` that does not match the given
|
|
29
35
|
`allowed_chars` regex set with `replacement`.
|
|
30
|
-
|
|
36
|
+
|
|
31
37
|
Parameters:
|
|
32
38
|
-----------
|
|
33
39
|
text : str
|
|
@@ -37,7 +43,7 @@ def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> st
|
|
|
37
43
|
For example, "a-zA-Z0-9" will keep only letters and digits.
|
|
38
44
|
replacement : str
|
|
39
45
|
The string to replace non-matching characters with.
|
|
40
|
-
|
|
46
|
+
|
|
41
47
|
Returns:
|
|
42
48
|
--------
|
|
43
49
|
str
|
|
@@ -47,9 +53,11 @@ def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> st
|
|
|
47
53
|
pattern = rf"[^{allowed_chars}]"
|
|
48
54
|
return re.sub(pattern, replacement, text)
|
|
49
55
|
|
|
56
|
+
|
|
50
57
|
def safe_tool_name(name: str):
|
|
51
58
|
return _replace_non_matching(name, "a-zA-Z0-9_-", "_")
|
|
52
59
|
|
|
60
|
+
|
|
53
61
|
# Collects a group of tool proxies and manages execution of openai tool calls
|
|
54
62
|
class CompletionsToolBundle:
|
|
55
63
|
def __init__(self, toolkits: List[Toolkit]):
|
|
@@ -58,40 +66,38 @@ class CompletionsToolBundle:
|
|
|
58
66
|
self._safe_names = {}
|
|
59
67
|
|
|
60
68
|
open_ai_tools = []
|
|
61
|
-
|
|
62
|
-
for toolkit in toolkits:
|
|
63
|
-
for v in toolkit.tools:
|
|
64
69
|
|
|
70
|
+
for toolkit in toolkits:
|
|
71
|
+
for v in toolkit.tools:
|
|
65
72
|
k = v.name
|
|
66
73
|
|
|
67
74
|
name = safe_tool_name(k)
|
|
68
75
|
|
|
69
76
|
if k in self._executors:
|
|
70
|
-
raise Exception(
|
|
77
|
+
raise Exception(
|
|
78
|
+
f"duplicate in bundle '{k}', tool names must be unique."
|
|
79
|
+
)
|
|
71
80
|
|
|
72
81
|
self._executors[k] = toolkit
|
|
73
82
|
|
|
74
83
|
self._safe_names[name] = k
|
|
75
84
|
|
|
76
85
|
fn = {
|
|
77
|
-
"name"
|
|
78
|
-
"parameters"
|
|
86
|
+
"name": name,
|
|
87
|
+
"parameters": {
|
|
79
88
|
**v.input_schema,
|
|
80
89
|
},
|
|
81
90
|
"strict": True,
|
|
82
91
|
}
|
|
83
92
|
|
|
84
|
-
|
|
85
|
-
if v.defs != None:
|
|
93
|
+
if v.defs is not None:
|
|
86
94
|
fn["parameters"]["$defs"] = v.defs
|
|
87
|
-
|
|
88
95
|
|
|
89
96
|
schema = {
|
|
90
|
-
"type"
|
|
91
|
-
"function"
|
|
97
|
+
"type": "function",
|
|
98
|
+
"function": fn,
|
|
92
99
|
}
|
|
93
100
|
|
|
94
|
-
|
|
95
101
|
open_ai_tools.append(schema)
|
|
96
102
|
|
|
97
103
|
if len(open_ai_tools) == 0:
|
|
@@ -99,16 +105,19 @@ class CompletionsToolBundle:
|
|
|
99
105
|
|
|
100
106
|
self._open_ai_tools = open_ai_tools
|
|
101
107
|
|
|
102
|
-
async def execute(
|
|
108
|
+
async def execute(
|
|
109
|
+
self, *, context: ToolContext, tool_call: ChatCompletionMessageToolCall
|
|
110
|
+
) -> Response:
|
|
103
111
|
try:
|
|
104
|
-
|
|
105
112
|
function = tool_call.function
|
|
106
113
|
name = function.name
|
|
107
114
|
arguments = json.loads(function.arguments)
|
|
108
115
|
|
|
109
116
|
if name not in self._safe_names:
|
|
110
|
-
raise RoomException(
|
|
111
|
-
|
|
117
|
+
raise RoomException(
|
|
118
|
+
f"Invalid tool name {name}, check the name of the tool"
|
|
119
|
+
)
|
|
120
|
+
|
|
112
121
|
name = self._safe_names[name]
|
|
113
122
|
|
|
114
123
|
if name not in self._executors:
|
|
@@ -117,8 +126,10 @@ class CompletionsToolBundle:
|
|
|
117
126
|
logger.info("executing %s %s %s", tool_call.id, name, arguments)
|
|
118
127
|
|
|
119
128
|
proxy = self._executors[name]
|
|
120
|
-
result = await proxy.execute(
|
|
121
|
-
|
|
129
|
+
result = await proxy.execute(
|
|
130
|
+
context=context, name=name, arguments=arguments
|
|
131
|
+
)
|
|
132
|
+
logger.info("success calling %s %s %s", tool_call.id, name, result)
|
|
122
133
|
return result
|
|
123
134
|
|
|
124
135
|
except Exception as e:
|
|
@@ -129,10 +140,10 @@ class CompletionsToolBundle:
|
|
|
129
140
|
return name in self._open_ai_tools
|
|
130
141
|
|
|
131
142
|
def to_json(self) -> List[dict] | None:
|
|
132
|
-
if self._open_ai_tools
|
|
143
|
+
if self._open_ai_tools is None:
|
|
133
144
|
return None
|
|
134
145
|
return self._open_ai_tools.copy()
|
|
135
|
-
|
|
146
|
+
|
|
136
147
|
|
|
137
148
|
# Converts a tool response into a series of messages that can be inserted into the openai context
|
|
138
149
|
class OpenAICompletionsToolResponseAdapter(ToolResponseAdapter):
|
|
@@ -141,80 +152,94 @@ class OpenAICompletionsToolResponseAdapter(ToolResponseAdapter):
|
|
|
141
152
|
pass
|
|
142
153
|
|
|
143
154
|
async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
|
|
144
|
-
if isinstance(response, LinkResponse):
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
155
|
+
if isinstance(response, LinkResponse):
|
|
156
|
+
return json.dumps(
|
|
157
|
+
{
|
|
158
|
+
"name": response.name,
|
|
159
|
+
"url": response.url,
|
|
160
|
+
}
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
elif isinstance(response, JsonResponse):
|
|
151
164
|
return json.dumps(response.json)
|
|
152
|
-
|
|
165
|
+
|
|
153
166
|
elif isinstance(response, TextResponse):
|
|
154
167
|
return response.text
|
|
155
|
-
|
|
156
|
-
elif isinstance(response, FileResponse):
|
|
157
168
|
|
|
169
|
+
elif isinstance(response, FileResponse):
|
|
158
170
|
blob = Blob(mime_type=response.mime_type, data=response.data)
|
|
159
171
|
uri = self._blob_storage.store(blob=blob)
|
|
160
|
-
|
|
172
|
+
|
|
161
173
|
return f"The results have been written to a blob with the uri {uri} with the mime type {blob.mime_type}."
|
|
162
|
-
|
|
174
|
+
|
|
163
175
|
elif isinstance(response, EmptyResponse):
|
|
164
176
|
return "ok"
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
elif isinstance(response, dict):
|
|
177
|
+
|
|
178
|
+
# elif isinstance(response, ImageResponse):
|
|
179
|
+
# context.messages.append({
|
|
180
|
+
# "role" : "tool",
|
|
181
|
+
# "content" : "the user will upload the image",
|
|
182
|
+
# "tool_call_id" : tool_call.id,
|
|
183
|
+
# })
|
|
184
|
+
# context.messages.append({
|
|
185
|
+
# "role" : "user",
|
|
186
|
+
# "content" : [
|
|
187
|
+
# { "type" : "text", "text": "this is the image from tool call id {tool_call.id}" },
|
|
188
|
+
# { "type" : "image_url", "image_url": {"url": response.url, "detail": "auto"} }
|
|
189
|
+
# ]
|
|
190
|
+
# })
|
|
191
|
+
|
|
192
|
+
elif isinstance(response, dict):
|
|
182
193
|
return json.dumps(response)
|
|
183
|
-
|
|
184
|
-
elif isinstance(response, str):
|
|
194
|
+
|
|
195
|
+
elif isinstance(response, str):
|
|
185
196
|
return response
|
|
186
197
|
|
|
187
|
-
elif response
|
|
198
|
+
elif response is None:
|
|
188
199
|
return "ok"
|
|
189
|
-
|
|
190
|
-
else:
|
|
191
|
-
raise Exception("unexpected return type: {type}".format(type=type(response)))
|
|
192
200
|
|
|
193
|
-
|
|
201
|
+
else:
|
|
202
|
+
raise Exception(
|
|
203
|
+
"unexpected return type: {type}".format(type=type(response))
|
|
204
|
+
)
|
|
194
205
|
|
|
206
|
+
async def create_messages(
|
|
207
|
+
self,
|
|
208
|
+
*,
|
|
209
|
+
context: AgentChatContext,
|
|
210
|
+
tool_call: Any,
|
|
211
|
+
room: RoomClient,
|
|
212
|
+
response: Response,
|
|
213
|
+
) -> list:
|
|
195
214
|
message = {
|
|
196
|
-
"role"
|
|
197
|
-
"content"
|
|
198
|
-
"tool_call_id"
|
|
199
|
-
}
|
|
200
|
-
|
|
201
|
-
room.developer.log_nowait(
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
215
|
+
"role": "tool",
|
|
216
|
+
"content": await self.to_plain_text(room=room, response=response),
|
|
217
|
+
"tool_call_id": tool_call.id,
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
room.developer.log_nowait(
|
|
221
|
+
type="llm.message",
|
|
222
|
+
data={
|
|
223
|
+
"context": context.id,
|
|
224
|
+
"participant_id": room.local_participant.id,
|
|
225
|
+
"participant_name": room.local_participant.get_attribute("name"),
|
|
226
|
+
"message": message,
|
|
227
|
+
},
|
|
228
|
+
)
|
|
205
229
|
|
|
230
|
+
return [message]
|
|
206
231
|
|
|
207
232
|
|
|
208
233
|
class OpenAICompletionsAdapter(LLMAdapter):
|
|
209
|
-
def __init__(
|
|
234
|
+
def __init__(
|
|
235
|
+
self,
|
|
210
236
|
model: str = os.getenv("OPENAI_MODEL"),
|
|
211
|
-
parallel_tool_calls
|
|
237
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
212
238
|
client: Optional[AsyncOpenAI] = None,
|
|
213
239
|
):
|
|
214
240
|
self._model = model
|
|
215
241
|
self._parallel_tool_calls = parallel_tool_calls
|
|
216
242
|
self._client = client
|
|
217
|
-
|
|
218
243
|
|
|
219
244
|
def create_chat_context(self):
|
|
220
245
|
system_role = "system"
|
|
@@ -225,16 +250,14 @@ class OpenAICompletionsAdapter(LLMAdapter):
|
|
|
225
250
|
elif self._model.startswith("o4"):
|
|
226
251
|
system_role = "developer"
|
|
227
252
|
|
|
228
|
-
context = AgentChatContext(
|
|
229
|
-
system_role=system_role
|
|
230
|
-
)
|
|
253
|
+
context = AgentChatContext(system_role=system_role)
|
|
231
254
|
|
|
232
255
|
return context
|
|
233
256
|
|
|
234
|
-
|
|
235
257
|
# Takes the current chat context, executes a completion request and processes the response.
|
|
236
258
|
# If a tool calls are requested, invokes the tools, processes the tool calls results, and appends the tool call results to the context
|
|
237
|
-
async def next(
|
|
259
|
+
async def next(
|
|
260
|
+
self,
|
|
238
261
|
*,
|
|
239
262
|
context: AgentChatContext,
|
|
240
263
|
room: RoomClient,
|
|
@@ -242,57 +265,71 @@ class OpenAICompletionsAdapter(LLMAdapter):
|
|
|
242
265
|
tool_adapter: Optional[ToolResponseAdapter] = None,
|
|
243
266
|
output_schema: Optional[dict] = None,
|
|
244
267
|
):
|
|
245
|
-
if tool_adapter
|
|
268
|
+
if tool_adapter is None:
|
|
246
269
|
tool_adapter = OpenAICompletionsToolResponseAdapter()
|
|
247
270
|
|
|
248
271
|
try:
|
|
249
272
|
openai = get_client(room=room)
|
|
250
273
|
|
|
251
|
-
tool_bundle = CompletionsToolBundle(
|
|
252
|
-
|
|
253
|
-
|
|
274
|
+
tool_bundle = CompletionsToolBundle(
|
|
275
|
+
toolkits=[
|
|
276
|
+
*toolkits,
|
|
277
|
+
]
|
|
278
|
+
)
|
|
254
279
|
open_ai_tools = tool_bundle.to_json()
|
|
255
280
|
|
|
256
|
-
if open_ai_tools
|
|
281
|
+
if open_ai_tools is not None:
|
|
257
282
|
logger.info("OpenAI Tools: %s", json.dumps(open_ai_tools))
|
|
258
283
|
else:
|
|
259
284
|
logger.info("OpenAI Tools: Empty")
|
|
260
|
-
|
|
285
|
+
|
|
261
286
|
response_schema = output_schema
|
|
262
287
|
response_name = "response"
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
288
|
+
|
|
289
|
+
while True:
|
|
290
|
+
logger.info(
|
|
291
|
+
"model: %s, context: %s, output_schema: %s",
|
|
292
|
+
self._model,
|
|
293
|
+
context.messages,
|
|
294
|
+
output_schema,
|
|
295
|
+
)
|
|
267
296
|
ptc = self._parallel_tool_calls
|
|
268
297
|
extra = {}
|
|
269
|
-
if ptc
|
|
270
|
-
extra["parallel_tool_calls"] = ptc
|
|
298
|
+
if ptc is not None and not self._model.startswith("o"):
|
|
299
|
+
extra["parallel_tool_calls"] = ptc
|
|
271
300
|
|
|
272
|
-
if output_schema
|
|
301
|
+
if output_schema is not None:
|
|
273
302
|
extra["response_format"] = {
|
|
274
|
-
"type"
|
|
303
|
+
"type": "json_schema",
|
|
275
304
|
"json_schema": {
|
|
276
|
-
"name"
|
|
277
|
-
"schema"
|
|
278
|
-
"strict"
|
|
279
|
-
}
|
|
305
|
+
"name": response_name,
|
|
306
|
+
"schema": response_schema,
|
|
307
|
+
"strict": True,
|
|
308
|
+
},
|
|
280
309
|
}
|
|
281
310
|
|
|
282
|
-
response
|
|
311
|
+
response: ChatCompletion = await openai.chat.completions.create(
|
|
283
312
|
n=1,
|
|
284
|
-
model
|
|
285
|
-
messages
|
|
286
|
-
tools
|
|
287
|
-
**extra
|
|
313
|
+
model=self._model,
|
|
314
|
+
messages=context.messages,
|
|
315
|
+
tools=open_ai_tools,
|
|
316
|
+
**extra,
|
|
288
317
|
)
|
|
289
318
|
message = response.choices[0].message
|
|
290
|
-
room.developer.log_nowait(
|
|
319
|
+
room.developer.log_nowait(
|
|
320
|
+
type="llm.message",
|
|
321
|
+
data={
|
|
322
|
+
"context": context.id,
|
|
323
|
+
"participant_id": room.local_participant.id,
|
|
324
|
+
"participant_name": room.local_participant.get_attribute(
|
|
325
|
+
"name"
|
|
326
|
+
),
|
|
327
|
+
"message": message.to_dict(),
|
|
328
|
+
},
|
|
329
|
+
)
|
|
291
330
|
context.messages.append(message)
|
|
292
|
-
|
|
293
|
-
if message.tool_calls
|
|
294
|
-
|
|
295
|
-
|
|
331
|
+
|
|
332
|
+
if message.tool_calls is not None:
|
|
296
333
|
tasks = []
|
|
297
334
|
|
|
298
335
|
async def do_tool_call(tool_call: ChatCompletionMessageToolCall):
|
|
@@ -300,22 +337,43 @@ class OpenAICompletionsAdapter(LLMAdapter):
|
|
|
300
337
|
tool_context = ToolContext(
|
|
301
338
|
room=room,
|
|
302
339
|
caller=room.local_participant,
|
|
303
|
-
caller_context={
|
|
340
|
+
caller_context={"chat": context.to_json},
|
|
341
|
+
)
|
|
342
|
+
tool_response = await tool_bundle.execute(
|
|
343
|
+
context=tool_context, tool_call=tool_call
|
|
304
344
|
)
|
|
305
|
-
tool_response = await tool_bundle.execute(context=tool_context, tool_call=tool_call)
|
|
306
345
|
logger.info(f"tool response {tool_response}")
|
|
307
|
-
return await tool_adapter.create_messages(
|
|
308
|
-
|
|
346
|
+
return await tool_adapter.create_messages(
|
|
347
|
+
context=context,
|
|
348
|
+
tool_call=tool_call,
|
|
349
|
+
room=room,
|
|
350
|
+
response=tool_response,
|
|
351
|
+
)
|
|
352
|
+
|
|
309
353
|
except Exception as e:
|
|
310
|
-
logger.error(
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
"
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
354
|
+
logger.error(
|
|
355
|
+
f"unable to complete tool call {tool_call}", exc_info=e
|
|
356
|
+
)
|
|
357
|
+
room.developer.log_nowait(
|
|
358
|
+
type="llm.error",
|
|
359
|
+
data={
|
|
360
|
+
"participant_id": room.local_participant.id,
|
|
361
|
+
"participant_name": room.local_participant.get_attribute(
|
|
362
|
+
"name"
|
|
363
|
+
),
|
|
364
|
+
"error": f"{e}",
|
|
365
|
+
},
|
|
366
|
+
)
|
|
318
367
|
|
|
368
|
+
return [
|
|
369
|
+
{
|
|
370
|
+
"role": "tool",
|
|
371
|
+
"content": json.dumps(
|
|
372
|
+
{"error": f"unable to complete tool call: {e}"}
|
|
373
|
+
),
|
|
374
|
+
"tool_call_id": tool_call.id,
|
|
375
|
+
}
|
|
376
|
+
]
|
|
319
377
|
|
|
320
378
|
for tool_call in message.tool_calls:
|
|
321
379
|
tasks.append(asyncio.create_task(do_tool_call(tool_call)))
|
|
@@ -323,43 +381,72 @@ class OpenAICompletionsAdapter(LLMAdapter):
|
|
|
323
381
|
results = await asyncio.gather(*tasks)
|
|
324
382
|
|
|
325
383
|
for result in results:
|
|
326
|
-
if result
|
|
327
|
-
room.developer.log_nowait(
|
|
384
|
+
if result is not None:
|
|
385
|
+
room.developer.log_nowait(
|
|
386
|
+
type="llm.message",
|
|
387
|
+
data={
|
|
388
|
+
"context": context.id,
|
|
389
|
+
"participant_id": room.local_participant.id,
|
|
390
|
+
"participant_name": room.local_participant.get_attribute(
|
|
391
|
+
"name"
|
|
392
|
+
),
|
|
393
|
+
"message": result,
|
|
394
|
+
},
|
|
395
|
+
)
|
|
328
396
|
context.messages.append(result)
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
elif message.content != None:
|
|
397
|
+
|
|
398
|
+
elif message.content is not None:
|
|
333
399
|
content = message.content
|
|
334
|
-
|
|
400
|
+
|
|
335
401
|
logger.info("RESPONSE FROM OPENAI %s", content)
|
|
336
|
-
if response_schema
|
|
402
|
+
if response_schema is None:
|
|
337
403
|
return content
|
|
338
|
-
|
|
404
|
+
|
|
339
405
|
# First try to parse the result
|
|
340
406
|
try:
|
|
341
407
|
full_response = json.loads(content)
|
|
342
408
|
# sometimes open ai packs two JSON chunks seperated by newline, check if that's why we couldn't parse
|
|
343
|
-
except json.decoder.JSONDecodeError
|
|
409
|
+
except json.decoder.JSONDecodeError:
|
|
344
410
|
for part in content.splitlines():
|
|
345
411
|
if len(part.strip()) > 0:
|
|
346
412
|
full_response = json.loads(part)
|
|
347
|
-
|
|
413
|
+
|
|
348
414
|
try:
|
|
349
|
-
self.validate(
|
|
415
|
+
self.validate(
|
|
416
|
+
response=full_response,
|
|
417
|
+
output_schema=response_schema,
|
|
418
|
+
)
|
|
350
419
|
except Exception as e:
|
|
351
|
-
logger.error(
|
|
352
|
-
|
|
353
|
-
|
|
420
|
+
logger.error(
|
|
421
|
+
"recieved invalid response, retrying",
|
|
422
|
+
exc_info=e,
|
|
423
|
+
)
|
|
424
|
+
error = {
|
|
425
|
+
"role": "user",
|
|
426
|
+
"content": "encountered a validation error with the output: {error}".format(
|
|
427
|
+
error=e
|
|
428
|
+
),
|
|
429
|
+
}
|
|
430
|
+
room.developer.log_nowait(
|
|
431
|
+
type="llm.message",
|
|
432
|
+
data={
|
|
433
|
+
"context": context.id,
|
|
434
|
+
"participant_id": room.local_participant.id,
|
|
435
|
+
"participant_name": room.local_participant.get_attribute(
|
|
436
|
+
"name"
|
|
437
|
+
),
|
|
438
|
+
"message": error,
|
|
439
|
+
},
|
|
440
|
+
)
|
|
354
441
|
context.messages.append(error)
|
|
355
442
|
continue
|
|
356
|
-
|
|
443
|
+
|
|
357
444
|
return full_response
|
|
358
445
|
else:
|
|
359
|
-
raise RoomException(
|
|
446
|
+
raise RoomException(
|
|
447
|
+
"Unexpected response from OpenAI {response}".format(
|
|
448
|
+
response=message
|
|
449
|
+
)
|
|
450
|
+
)
|
|
360
451
|
except APIStatusError as e:
|
|
361
452
|
raise RoomException(f"Error from OpenAI: {e}")
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|