meshagent-openai 0.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- meshagent/openai/__init__.py +16 -0
- meshagent/openai/proxy/__init__.py +3 -0
- meshagent/openai/proxy/proxy.py +79 -0
- meshagent/openai/tools/__init__.py +18 -0
- meshagent/openai/tools/apply_patch.py +344 -0
- meshagent/openai/tools/completions_adapter.py +437 -0
- meshagent/openai/tools/responses_adapter.py +2369 -0
- meshagent/openai/tools/schema.py +253 -0
- meshagent/openai/tools/stt.py +118 -0
- meshagent/openai/tools/stt_test.py +87 -0
- meshagent/openai/version.py +1 -0
- meshagent_openai-0.18.0.dist-info/METADATA +50 -0
- meshagent_openai-0.18.0.dist-info/RECORD +16 -0
- meshagent_openai-0.18.0.dist-info/WHEEL +5 -0
- meshagent_openai-0.18.0.dist-info/licenses/LICENSE +201 -0
- meshagent_openai-0.18.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,437 @@
|
|
|
1
|
+
from meshagent.agents.agent import AgentChatContext
|
|
2
|
+
from meshagent.api import RoomClient, RoomException, RemoteParticipant
|
|
3
|
+
from meshagent.tools import Toolkit, ToolContext
|
|
4
|
+
from meshagent.api.messaging import (
|
|
5
|
+
Response,
|
|
6
|
+
LinkResponse,
|
|
7
|
+
FileResponse,
|
|
8
|
+
JsonResponse,
|
|
9
|
+
TextResponse,
|
|
10
|
+
EmptyResponse,
|
|
11
|
+
)
|
|
12
|
+
from meshagent.agents.adapter import ToolResponseAdapter, LLMAdapter
|
|
13
|
+
import json
|
|
14
|
+
from typing import List
|
|
15
|
+
|
|
16
|
+
from openai import AsyncOpenAI, APIStatusError
|
|
17
|
+
from openai.types.chat import ChatCompletion, ChatCompletionMessageToolCall
|
|
18
|
+
|
|
19
|
+
import os
|
|
20
|
+
from typing import Optional, Any
|
|
21
|
+
|
|
22
|
+
import logging
|
|
23
|
+
import re
|
|
24
|
+
import asyncio
|
|
25
|
+
|
|
26
|
+
from meshagent.openai.proxy import get_client
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger("openai_agent")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> str:
|
|
32
|
+
"""
|
|
33
|
+
Replaces every character in `text` that does not match the given
|
|
34
|
+
`allowed_chars` regex set with `replacement`.
|
|
35
|
+
|
|
36
|
+
Parameters:
|
|
37
|
+
-----------
|
|
38
|
+
text : str
|
|
39
|
+
The input string on which the replacement is to be done.
|
|
40
|
+
allowed_chars : str
|
|
41
|
+
A string defining the set of allowed characters (part of a character set).
|
|
42
|
+
For example, "a-zA-Z0-9" will keep only letters and digits.
|
|
43
|
+
replacement : str
|
|
44
|
+
The string to replace non-matching characters with.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
--------
|
|
48
|
+
str
|
|
49
|
+
A new string where all characters not in `allowed_chars` are replaced.
|
|
50
|
+
"""
|
|
51
|
+
# Build a regex that matches any character NOT in allowed_chars
|
|
52
|
+
pattern = rf"[^{allowed_chars}]"
|
|
53
|
+
return re.sub(pattern, replacement, text)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def safe_tool_name(name: str):
|
|
57
|
+
return _replace_non_matching(name, "a-zA-Z0-9_-", "_")
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# Collects a group of tool proxies and manages execution of openai tool calls
|
|
61
|
+
class CompletionsToolBundle:
|
|
62
|
+
def __init__(self, toolkits: List[Toolkit]):
|
|
63
|
+
self._toolkits = toolkits
|
|
64
|
+
self._executors = dict[str, Toolkit]()
|
|
65
|
+
self._safe_names = {}
|
|
66
|
+
|
|
67
|
+
open_ai_tools = []
|
|
68
|
+
|
|
69
|
+
for toolkit in toolkits:
|
|
70
|
+
for v in toolkit.tools:
|
|
71
|
+
k = v.name
|
|
72
|
+
|
|
73
|
+
name = safe_tool_name(k)
|
|
74
|
+
|
|
75
|
+
if k in self._executors:
|
|
76
|
+
raise Exception(
|
|
77
|
+
f"duplicate in bundle '{k}', tool names must be unique."
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
self._executors[k] = toolkit
|
|
81
|
+
|
|
82
|
+
self._safe_names[name] = k
|
|
83
|
+
|
|
84
|
+
fn = {
|
|
85
|
+
"name": name,
|
|
86
|
+
"parameters": {
|
|
87
|
+
**v.input_schema,
|
|
88
|
+
},
|
|
89
|
+
"strict": True,
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if v.defs is not None:
|
|
93
|
+
fn["parameters"]["$defs"] = v.defs
|
|
94
|
+
|
|
95
|
+
schema = {
|
|
96
|
+
"type": "function",
|
|
97
|
+
"function": fn,
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
open_ai_tools.append(schema)
|
|
101
|
+
|
|
102
|
+
if len(open_ai_tools) == 0:
|
|
103
|
+
open_ai_tools = None
|
|
104
|
+
|
|
105
|
+
self._open_ai_tools = open_ai_tools
|
|
106
|
+
|
|
107
|
+
async def execute(
|
|
108
|
+
self, *, context: ToolContext, tool_call: ChatCompletionMessageToolCall
|
|
109
|
+
) -> Response:
|
|
110
|
+
function = tool_call.function
|
|
111
|
+
name = function.name
|
|
112
|
+
arguments = json.loads(function.arguments)
|
|
113
|
+
|
|
114
|
+
if name not in self._safe_names:
|
|
115
|
+
raise RoomException(f"Invalid tool name {name}, check the name of the tool")
|
|
116
|
+
|
|
117
|
+
name = self._safe_names[name]
|
|
118
|
+
|
|
119
|
+
if name not in self._executors:
|
|
120
|
+
raise Exception(f"Unregistered tool name {name}")
|
|
121
|
+
|
|
122
|
+
proxy = self._executors[name]
|
|
123
|
+
result = await proxy.execute(context=context, name=name, arguments=arguments)
|
|
124
|
+
return result
|
|
125
|
+
|
|
126
|
+
def contains(self, name: str) -> bool:
|
|
127
|
+
return name in self._open_ai_tools
|
|
128
|
+
|
|
129
|
+
def to_json(self) -> List[dict] | None:
|
|
130
|
+
if self._open_ai_tools is None:
|
|
131
|
+
return None
|
|
132
|
+
return self._open_ai_tools.copy()
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
# Converts a tool response into a series of messages that can be inserted into the openai context
|
|
136
|
+
class OpenAICompletionsToolResponseAdapter(ToolResponseAdapter):
|
|
137
|
+
def __init__(self):
|
|
138
|
+
pass
|
|
139
|
+
|
|
140
|
+
async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
|
|
141
|
+
if isinstance(response, LinkResponse):
|
|
142
|
+
return json.dumps(
|
|
143
|
+
{
|
|
144
|
+
"name": response.name,
|
|
145
|
+
"url": response.url,
|
|
146
|
+
}
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
elif isinstance(response, JsonResponse):
|
|
150
|
+
return json.dumps(response.json)
|
|
151
|
+
|
|
152
|
+
elif isinstance(response, TextResponse):
|
|
153
|
+
return response.text
|
|
154
|
+
|
|
155
|
+
elif isinstance(response, FileResponse):
|
|
156
|
+
return f"{response.name}"
|
|
157
|
+
|
|
158
|
+
elif isinstance(response, EmptyResponse):
|
|
159
|
+
return "ok"
|
|
160
|
+
|
|
161
|
+
# elif isinstance(response, ImageResponse):
|
|
162
|
+
# context.messages.append({
|
|
163
|
+
# "role" : "tool",
|
|
164
|
+
# "content" : "the user will upload the image",
|
|
165
|
+
# "tool_call_id" : tool_call.id,
|
|
166
|
+
# })
|
|
167
|
+
# context.messages.append({
|
|
168
|
+
# "role" : "user",
|
|
169
|
+
# "content" : [
|
|
170
|
+
# { "type" : "text", "text": "this is the image from tool call id {tool_call.id}" },
|
|
171
|
+
# { "type" : "image_url", "image_url": {"url": response.url, "detail": "auto"} }
|
|
172
|
+
# ]
|
|
173
|
+
# })
|
|
174
|
+
|
|
175
|
+
elif isinstance(response, dict):
|
|
176
|
+
return json.dumps(response)
|
|
177
|
+
|
|
178
|
+
elif isinstance(response, str):
|
|
179
|
+
return response
|
|
180
|
+
|
|
181
|
+
elif response is None:
|
|
182
|
+
return "ok"
|
|
183
|
+
|
|
184
|
+
else:
|
|
185
|
+
raise Exception(
|
|
186
|
+
"unexpected return type: {type}".format(type=type(response))
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
async def create_messages(
|
|
190
|
+
self,
|
|
191
|
+
*,
|
|
192
|
+
context: AgentChatContext,
|
|
193
|
+
tool_call: Any,
|
|
194
|
+
room: RoomClient,
|
|
195
|
+
response: Response,
|
|
196
|
+
) -> list:
|
|
197
|
+
message = {
|
|
198
|
+
"role": "tool",
|
|
199
|
+
"content": await self.to_plain_text(room=room, response=response),
|
|
200
|
+
"tool_call_id": tool_call.id,
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
room.developer.log_nowait(
|
|
204
|
+
type="llm.message",
|
|
205
|
+
data={
|
|
206
|
+
"context": context.id,
|
|
207
|
+
"participant_id": room.local_participant.id,
|
|
208
|
+
"participant_name": room.local_participant.get_attribute("name"),
|
|
209
|
+
"message": message,
|
|
210
|
+
},
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
return [message]
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
class OpenAICompletionsAdapter(LLMAdapter):
|
|
217
|
+
def __init__(
|
|
218
|
+
self,
|
|
219
|
+
model: str = os.getenv("OPENAI_MODEL"),
|
|
220
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
221
|
+
client: Optional[AsyncOpenAI] = None,
|
|
222
|
+
):
|
|
223
|
+
self._model = model
|
|
224
|
+
self._parallel_tool_calls = parallel_tool_calls
|
|
225
|
+
self._client = client
|
|
226
|
+
|
|
227
|
+
def create_chat_context(self):
|
|
228
|
+
system_role = "system"
|
|
229
|
+
if self._model.startswith("o1"):
|
|
230
|
+
system_role = "developer"
|
|
231
|
+
elif self._model.startswith("o3"):
|
|
232
|
+
system_role = "developer"
|
|
233
|
+
elif self._model.startswith("o4"):
|
|
234
|
+
system_role = "developer"
|
|
235
|
+
|
|
236
|
+
context = AgentChatContext(system_role=system_role)
|
|
237
|
+
|
|
238
|
+
return context
|
|
239
|
+
|
|
240
|
+
# Takes the current chat context, executes a completion request and processes the response.
|
|
241
|
+
# If a tool calls are requested, invokes the tools, processes the tool calls results, and appends the tool call results to the context
|
|
242
|
+
async def next(
|
|
243
|
+
self,
|
|
244
|
+
*,
|
|
245
|
+
model: Optional[str] = None,
|
|
246
|
+
context: AgentChatContext,
|
|
247
|
+
room: RoomClient,
|
|
248
|
+
toolkits: Toolkit,
|
|
249
|
+
tool_adapter: Optional[ToolResponseAdapter] = None,
|
|
250
|
+
output_schema: Optional[dict] = None,
|
|
251
|
+
on_behalf_of: Optional[RemoteParticipant] = None,
|
|
252
|
+
):
|
|
253
|
+
if tool_adapter is None:
|
|
254
|
+
tool_adapter = OpenAICompletionsToolResponseAdapter()
|
|
255
|
+
|
|
256
|
+
try:
|
|
257
|
+
openai = self._client if self._client is not None else get_client(room=room)
|
|
258
|
+
|
|
259
|
+
tool_bundle = CompletionsToolBundle(
|
|
260
|
+
toolkits=[
|
|
261
|
+
*toolkits,
|
|
262
|
+
]
|
|
263
|
+
)
|
|
264
|
+
open_ai_tools = tool_bundle.to_json()
|
|
265
|
+
|
|
266
|
+
if open_ai_tools is not None:
|
|
267
|
+
logger.info("OpenAI Tools: %s", json.dumps(open_ai_tools))
|
|
268
|
+
else:
|
|
269
|
+
logger.info("OpenAI Tools: Empty")
|
|
270
|
+
|
|
271
|
+
response_schema = output_schema
|
|
272
|
+
response_name = "response"
|
|
273
|
+
|
|
274
|
+
while True:
|
|
275
|
+
logger.info(
|
|
276
|
+
"model: %s, context: %s, output_schema: %s",
|
|
277
|
+
self._model,
|
|
278
|
+
context.messages,
|
|
279
|
+
output_schema,
|
|
280
|
+
)
|
|
281
|
+
ptc = self._parallel_tool_calls
|
|
282
|
+
extra = {}
|
|
283
|
+
if ptc is not None and not self._model.startswith("o"):
|
|
284
|
+
extra["parallel_tool_calls"] = ptc
|
|
285
|
+
|
|
286
|
+
if output_schema is not None:
|
|
287
|
+
extra["response_format"] = {
|
|
288
|
+
"type": "json_schema",
|
|
289
|
+
"json_schema": {
|
|
290
|
+
"name": response_name,
|
|
291
|
+
"schema": response_schema,
|
|
292
|
+
"strict": True,
|
|
293
|
+
},
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
response: ChatCompletion = await openai.chat.completions.create(
|
|
297
|
+
n=1,
|
|
298
|
+
model=self._model,
|
|
299
|
+
messages=context.messages,
|
|
300
|
+
tools=open_ai_tools,
|
|
301
|
+
**extra,
|
|
302
|
+
)
|
|
303
|
+
message = response.choices[0].message
|
|
304
|
+
room.developer.log_nowait(
|
|
305
|
+
type="llm.message",
|
|
306
|
+
data={
|
|
307
|
+
"context": context.id,
|
|
308
|
+
"participant_id": room.local_participant.id,
|
|
309
|
+
"participant_name": room.local_participant.get_attribute(
|
|
310
|
+
"name"
|
|
311
|
+
),
|
|
312
|
+
"message": message.to_dict(),
|
|
313
|
+
},
|
|
314
|
+
)
|
|
315
|
+
context.messages.append(message)
|
|
316
|
+
|
|
317
|
+
if message.tool_calls is not None:
|
|
318
|
+
tasks = []
|
|
319
|
+
|
|
320
|
+
async def do_tool_call(tool_call: ChatCompletionMessageToolCall):
|
|
321
|
+
try:
|
|
322
|
+
tool_context = ToolContext(
|
|
323
|
+
room=room,
|
|
324
|
+
caller=room.local_participant,
|
|
325
|
+
caller_context={"chat": context.to_json},
|
|
326
|
+
)
|
|
327
|
+
tool_response = await tool_bundle.execute(
|
|
328
|
+
context=tool_context, tool_call=tool_call
|
|
329
|
+
)
|
|
330
|
+
logger.info(f"tool response {tool_response}")
|
|
331
|
+
return await tool_adapter.create_messages(
|
|
332
|
+
context=context,
|
|
333
|
+
tool_call=tool_call,
|
|
334
|
+
room=room,
|
|
335
|
+
response=tool_response,
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
except Exception as e:
|
|
339
|
+
logger.error(
|
|
340
|
+
f"unable to complete tool call {tool_call}", exc_info=e
|
|
341
|
+
)
|
|
342
|
+
room.developer.log_nowait(
|
|
343
|
+
type="llm.error",
|
|
344
|
+
data={
|
|
345
|
+
"participant_id": room.local_participant.id,
|
|
346
|
+
"participant_name": room.local_participant.get_attribute(
|
|
347
|
+
"name"
|
|
348
|
+
),
|
|
349
|
+
"error": f"{e}",
|
|
350
|
+
},
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
return [
|
|
354
|
+
{
|
|
355
|
+
"role": "tool",
|
|
356
|
+
"content": json.dumps(
|
|
357
|
+
{"error": f"unable to complete tool call: {e}"}
|
|
358
|
+
),
|
|
359
|
+
"tool_call_id": tool_call.id,
|
|
360
|
+
}
|
|
361
|
+
]
|
|
362
|
+
|
|
363
|
+
for tool_call in message.tool_calls:
|
|
364
|
+
tasks.append(asyncio.create_task(do_tool_call(tool_call)))
|
|
365
|
+
|
|
366
|
+
results = await asyncio.gather(*tasks)
|
|
367
|
+
|
|
368
|
+
for result in results:
|
|
369
|
+
if result is not None:
|
|
370
|
+
room.developer.log_nowait(
|
|
371
|
+
type="llm.message",
|
|
372
|
+
data={
|
|
373
|
+
"context": context.id,
|
|
374
|
+
"participant_id": room.local_participant.id,
|
|
375
|
+
"participant_name": room.local_participant.get_attribute(
|
|
376
|
+
"name"
|
|
377
|
+
),
|
|
378
|
+
"message": result,
|
|
379
|
+
},
|
|
380
|
+
)
|
|
381
|
+
context.messages.append(result)
|
|
382
|
+
|
|
383
|
+
elif message.content is not None:
|
|
384
|
+
content = message.content
|
|
385
|
+
|
|
386
|
+
logger.info("RESPONSE FROM OPENAI %s", content)
|
|
387
|
+
if response_schema is None:
|
|
388
|
+
return content
|
|
389
|
+
|
|
390
|
+
# First try to parse the result
|
|
391
|
+
try:
|
|
392
|
+
full_response = json.loads(content)
|
|
393
|
+
# sometimes open ai packs two JSON chunks seperated by newline, check if that's why we couldn't parse
|
|
394
|
+
except json.decoder.JSONDecodeError:
|
|
395
|
+
for part in content.splitlines():
|
|
396
|
+
if len(part.strip()) > 0:
|
|
397
|
+
full_response = json.loads(part)
|
|
398
|
+
|
|
399
|
+
try:
|
|
400
|
+
self.validate(
|
|
401
|
+
response=full_response,
|
|
402
|
+
output_schema=response_schema,
|
|
403
|
+
)
|
|
404
|
+
except Exception as e:
|
|
405
|
+
logger.error(
|
|
406
|
+
"recieved invalid response, retrying",
|
|
407
|
+
exc_info=e,
|
|
408
|
+
)
|
|
409
|
+
error = {
|
|
410
|
+
"role": "user",
|
|
411
|
+
"content": "encountered a validation error with the output: {error}".format(
|
|
412
|
+
error=e
|
|
413
|
+
),
|
|
414
|
+
}
|
|
415
|
+
room.developer.log_nowait(
|
|
416
|
+
type="llm.message",
|
|
417
|
+
data={
|
|
418
|
+
"context": context.id,
|
|
419
|
+
"participant_id": room.local_participant.id,
|
|
420
|
+
"participant_name": room.local_participant.get_attribute(
|
|
421
|
+
"name"
|
|
422
|
+
),
|
|
423
|
+
"message": error,
|
|
424
|
+
},
|
|
425
|
+
)
|
|
426
|
+
context.messages.append(error)
|
|
427
|
+
continue
|
|
428
|
+
|
|
429
|
+
return full_response
|
|
430
|
+
else:
|
|
431
|
+
raise RoomException(
|
|
432
|
+
"Unexpected response from OpenAI {response}".format(
|
|
433
|
+
response=message
|
|
434
|
+
)
|
|
435
|
+
)
|
|
436
|
+
except APIStatusError as e:
|
|
437
|
+
raise RoomException(f"Error from OpenAI: {e}")
|