meshagent-openai 0.0.34__py3-none-any.whl → 0.0.36__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of meshagent-openai might be problematic. Click here for more details.
- meshagent/openai/proxy/proxy.py +5 -1
- meshagent/openai/tools/responses_adapter.py +240 -244
- meshagent/openai/tools/stt.py +3 -2
- meshagent/openai/version.py +1 -1
- {meshagent_openai-0.0.34.dist-info → meshagent_openai-0.0.36.dist-info}/METADATA +8 -8
- meshagent_openai-0.0.36.dist-info/RECORD +15 -0
- meshagent_openai-0.0.34.dist-info/RECORD +0 -15
- {meshagent_openai-0.0.34.dist-info → meshagent_openai-0.0.36.dist-info}/WHEEL +0 -0
- {meshagent_openai-0.0.34.dist-info → meshagent_openai-0.0.36.dist-info}/licenses/LICENSE +0 -0
- {meshagent_openai-0.0.34.dist-info → meshagent_openai-0.0.36.dist-info}/top_level.txt +0 -0
meshagent/openai/proxy/proxy.py
CHANGED
|
@@ -5,7 +5,11 @@ from openai import AsyncOpenAI
|
|
|
5
5
|
def get_client(*, room: RoomClient) -> AsyncOpenAI:
|
|
6
6
|
|
|
7
7
|
token : str = room.protocol.token
|
|
8
|
-
|
|
8
|
+
|
|
9
|
+
# when running inside the room pod, the room.room_url currently points to the external url
|
|
10
|
+
# so we need to use url off the protocol (if available).
|
|
11
|
+
# TODO: room_url should be set properly, but may need a claim in the token to be set during call to say it is local
|
|
12
|
+
url : str = getattr(room.protocol, "url", room.room_url)
|
|
9
13
|
|
|
10
14
|
room_proxy_url = f"{url}/v1"
|
|
11
15
|
|
|
@@ -19,6 +19,7 @@ import re
|
|
|
19
19
|
import asyncio
|
|
20
20
|
|
|
21
21
|
from pydantic import BaseModel
|
|
22
|
+
import copy
|
|
22
23
|
|
|
23
24
|
logger = logging.getLogger("openai_agent")
|
|
24
25
|
|
|
@@ -26,12 +27,18 @@ from opentelemetry import trace
|
|
|
26
27
|
|
|
27
28
|
tracer = trace.get_tracer("openai.llm.responses")
|
|
28
29
|
|
|
30
|
+
|
|
31
|
+
def safe_json_dump(data: dict):
|
|
32
|
+
|
|
33
|
+
return json.dumps(copy.deepcopy(data))
|
|
34
|
+
|
|
29
35
|
def safe_model_dump(model: BaseModel):
|
|
30
36
|
try:
|
|
31
|
-
return model.
|
|
37
|
+
return safe_json_dump(model.model_dump(mode='json'))
|
|
32
38
|
except:
|
|
33
39
|
return {"error":"unable to dump json for model"}
|
|
34
40
|
|
|
41
|
+
|
|
35
42
|
def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> str:
|
|
36
43
|
"""
|
|
37
44
|
Replaces every character in `text` that does not match the given
|
|
@@ -216,39 +223,43 @@ class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
|
|
|
216
223
|
|
|
217
224
|
async def create_messages(self, *, context: AgentChatContext, tool_call: ResponseFunctionToolCall, room: RoomClient, response: Response) -> list:
|
|
218
225
|
|
|
219
|
-
|
|
226
|
+
with tracer.start_as_current_span("llm.tool_adapter.create_messages") as span:
|
|
227
|
+
|
|
220
228
|
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
message = {
|
|
230
|
-
"output" : output,
|
|
231
|
-
"call_id" : tool_call.call_id,
|
|
232
|
-
"type" : "function_call_output"
|
|
233
|
-
}
|
|
229
|
+
if isinstance(response, RawOutputs):
|
|
230
|
+
span.set_attribute("kind", "raw")
|
|
231
|
+
for output in response.outputs:
|
|
232
|
+
|
|
233
|
+
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : output })
|
|
234
|
+
|
|
235
|
+
return response.outputs
|
|
236
|
+
else:
|
|
234
237
|
|
|
235
|
-
|
|
238
|
+
span.set_attribute("kind", "text")
|
|
239
|
+
output = await self.to_plain_text(room=room, response=response)
|
|
240
|
+
span.set_attribute("output", output)
|
|
241
|
+
|
|
242
|
+
message = {
|
|
243
|
+
"output" : output,
|
|
244
|
+
"call_id" : tool_call.call_id,
|
|
245
|
+
"type" : "function_call_output"
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message })
|
|
236
249
|
|
|
237
|
-
|
|
250
|
+
return [ message ]
|
|
238
251
|
|
|
239
252
|
class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
240
253
|
def __init__(self,
|
|
241
254
|
model: str = os.getenv("OPENAI_MODEL","gpt-4.1"),
|
|
242
255
|
parallel_tool_calls : Optional[bool] = None,
|
|
243
256
|
client: Optional[AsyncOpenAI] = None,
|
|
244
|
-
retries : int = 0,
|
|
245
257
|
response_options : Optional[dict] = None,
|
|
246
258
|
provider: str = "openai"
|
|
247
259
|
):
|
|
248
260
|
self._model = model
|
|
249
261
|
self._parallel_tool_calls = parallel_tool_calls
|
|
250
262
|
self._client = client
|
|
251
|
-
self._retries = retries
|
|
252
263
|
self._response_options = response_options
|
|
253
264
|
self._provider = provider
|
|
254
265
|
|
|
@@ -273,13 +284,10 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
|
273
284
|
async def check_for_termination(self, *, context: AgentChatContext, room: RoomClient) -> bool:
|
|
274
285
|
if len(context.previous_messages) > 0:
|
|
275
286
|
last_message = context.previous_messages[-1]
|
|
276
|
-
|
|
277
|
-
|
|
287
|
+
|
|
278
288
|
for message in context.messages:
|
|
279
289
|
|
|
280
290
|
if message.get("type", "message") != "message":
|
|
281
|
-
logger.info(f"found {message.get("type", "message")}")
|
|
282
|
-
|
|
283
291
|
return False
|
|
284
292
|
|
|
285
293
|
return True
|
|
@@ -328,14 +336,9 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
|
328
336
|
])
|
|
329
337
|
open_ai_tools = tool_bundle.to_json()
|
|
330
338
|
|
|
331
|
-
if open_ai_tools
|
|
332
|
-
logger.info("OpenAI Tools: %s", json.dumps(open_ai_tools))
|
|
333
|
-
else:
|
|
334
|
-
logger.info("OpenAI Tools: Empty")
|
|
339
|
+
if open_ai_tools == None:
|
|
335
340
|
open_ai_tools = NOT_GIVEN
|
|
336
341
|
|
|
337
|
-
|
|
338
|
-
logger.info("model: %s, context: %s, output_schema: %s", self._model, context.messages, output_schema)
|
|
339
342
|
ptc = self._parallel_tool_calls
|
|
340
343
|
extra = {}
|
|
341
344
|
if ptc != None and self._model.startswith("o") == False:
|
|
@@ -365,108 +368,91 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
|
365
368
|
|
|
366
369
|
stream = event_handler != None
|
|
367
370
|
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
previous_response_id=previous_response_id,
|
|
384
|
-
|
|
385
|
-
**response_options
|
|
386
|
-
)
|
|
387
|
-
break
|
|
388
|
-
except APIStatusError as e:
|
|
389
|
-
logger.error(f"error calling openai attempt: {i+1} ({e.response.request.url})", exc_info=e)
|
|
390
|
-
raise
|
|
391
|
-
except Exception as e:
|
|
392
|
-
logger.error(f"error calling openai attempt: {i+1}", exc_info=e)
|
|
393
|
-
if i == self._retries:
|
|
394
|
-
raise
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
async def handle_message(message: BaseModel):
|
|
371
|
+
|
|
372
|
+
with tracer.start_as_current_span("llm.invoke") as span:
|
|
373
|
+
response_options = self._response_options
|
|
374
|
+
if response_options == None:
|
|
375
|
+
response_options = {}
|
|
376
|
+
response : Response = await openai.responses.create(
|
|
377
|
+
stream=stream,
|
|
378
|
+
model = self._model,
|
|
379
|
+
input = context.messages,
|
|
380
|
+
tools = open_ai_tools,
|
|
381
|
+
text = text,
|
|
382
|
+
previous_response_id=previous_response_id,
|
|
383
|
+
|
|
384
|
+
**response_options
|
|
385
|
+
)
|
|
398
386
|
|
|
387
|
+
async def handle_message(message: BaseModel):
|
|
399
388
|
|
|
389
|
+
with tracer.start_as_current_span("llm.handle_response") as span:
|
|
400
390
|
|
|
401
|
-
|
|
391
|
+
span.set_attributes({
|
|
392
|
+
"type" : message.type,
|
|
393
|
+
"message" : safe_model_dump(message)
|
|
394
|
+
})
|
|
395
|
+
|
|
396
|
+
room.developer.log_nowait(type=f"llm.message", data={
|
|
397
|
+
"context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message.to_dict()
|
|
398
|
+
})
|
|
402
399
|
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
})
|
|
407
|
-
|
|
408
|
-
room.developer.log_nowait(type=f"llm.message", data={
|
|
409
|
-
"context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message.to_dict()
|
|
410
|
-
})
|
|
400
|
+
if message.type == "function_call":
|
|
401
|
+
|
|
402
|
+
tasks = []
|
|
411
403
|
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
404
|
+
async def do_tool_call(tool_call: ResponseFunctionToolCall):
|
|
405
|
+
|
|
406
|
+
try:
|
|
407
|
+
with tracer.start_as_current_span("llm.handle_tool_call") as span:
|
|
408
|
+
|
|
409
|
+
span.set_attributes({
|
|
410
|
+
"id": tool_call.id,
|
|
411
|
+
"name": tool_call.name,
|
|
412
|
+
"call_id": tool_call.call_id,
|
|
413
|
+
"arguments": json.dumps(tool_call.arguments)
|
|
414
|
+
})
|
|
415
415
|
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
416
|
+
tool_context = ToolContext(
|
|
417
|
+
room=room,
|
|
418
|
+
caller=room.local_participant,
|
|
419
|
+
caller_context={ "chat" : context.to_json() }
|
|
420
|
+
)
|
|
421
|
+
tool_response = await tool_bundle.execute(context=tool_context, tool_call=tool_call)
|
|
422
|
+
if tool_response.caller_context != None:
|
|
423
|
+
if tool_response.caller_context.get("chat", None) != None:
|
|
424
|
+
tool_chat_context = AgentChatContext.from_json(tool_response.caller_context["chat"])
|
|
425
|
+
if tool_chat_context.previous_response_id != None:
|
|
426
|
+
context.track_response(tool_chat_context.previous_response_id)
|
|
427
|
+
|
|
428
|
+
logger.info(f"tool response {tool_response}")
|
|
429
|
+
return await tool_adapter.create_messages(context=context, tool_call=tool_call, room=room, response=tool_response)
|
|
420
430
|
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
caller=room.local_participant,
|
|
431
|
-
caller_context={ "chat" : context.to_json() }
|
|
432
|
-
)
|
|
433
|
-
tool_response = await tool_bundle.execute(context=tool_context, tool_call=tool_call)
|
|
434
|
-
if tool_response.caller_context != None:
|
|
435
|
-
if tool_response.caller_context.get("chat", None) != None:
|
|
436
|
-
tool_chat_context = AgentChatContext.from_json(tool_response.caller_context["chat"])
|
|
437
|
-
if tool_chat_context.previous_response_id != None:
|
|
438
|
-
context.track_response(tool_chat_context.previous_response_id)
|
|
439
|
-
|
|
440
|
-
span.set_attribute("response", await tool_adapter.to_plain_text(room=room, response=tool_response))
|
|
441
|
-
|
|
442
|
-
logger.info(f"tool response {tool_response}")
|
|
443
|
-
return await tool_adapter.create_messages(context=context, tool_call=tool_call, room=room, response=tool_response)
|
|
444
|
-
|
|
445
|
-
except Exception as e:
|
|
446
|
-
logger.error(f"unable to complete tool call {tool_call}", exc_info=e)
|
|
447
|
-
room.developer.log_nowait(type="llm.error", data={ "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "error" : f"{e}" })
|
|
448
|
-
|
|
449
|
-
return [{
|
|
450
|
-
"output" : json.dumps({"error":f"unable to complete tool call: {e}"}),
|
|
451
|
-
"call_id" : tool_call.call_id,
|
|
452
|
-
"type" : "function_call_output"
|
|
453
|
-
}]
|
|
431
|
+
except Exception as e:
|
|
432
|
+
logger.error(f"unable to complete tool call {tool_call}", exc_info=e)
|
|
433
|
+
room.developer.log_nowait(type="llm.error", data={ "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "error" : f"{e}" })
|
|
434
|
+
|
|
435
|
+
return [{
|
|
436
|
+
"output" : json.dumps({"error":f"unable to complete tool call: {e}"}),
|
|
437
|
+
"call_id" : tool_call.call_id,
|
|
438
|
+
"type" : "function_call_output"
|
|
439
|
+
}]
|
|
454
440
|
|
|
455
441
|
|
|
456
|
-
|
|
442
|
+
tasks.append(asyncio.create_task(do_tool_call(message)))
|
|
457
443
|
|
|
458
|
-
|
|
444
|
+
results = await asyncio.gather(*tasks)
|
|
459
445
|
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
446
|
+
all_results = []
|
|
447
|
+
for result in results:
|
|
448
|
+
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : result })
|
|
449
|
+
all_results.extend(result)
|
|
464
450
|
|
|
465
|
-
|
|
451
|
+
return all_results, False
|
|
466
452
|
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
453
|
+
elif message.type == "message":
|
|
454
|
+
|
|
455
|
+
|
|
470
456
|
contents = message.content
|
|
471
457
|
if response_schema == None:
|
|
472
458
|
return [], False
|
|
@@ -492,159 +478,169 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
|
492
478
|
continue
|
|
493
479
|
|
|
494
480
|
return [ full_response ], True
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
else:
|
|
517
|
-
for toolkit in toolkits:
|
|
518
|
-
for tool in toolkit.tools:
|
|
519
|
-
if isinstance(tool, OpenAIResponsesTool):
|
|
520
|
-
handlers = tool.get_open_ai_output_handlers()
|
|
521
|
-
if message.type in handlers:
|
|
522
|
-
tool_context = ToolContext(
|
|
523
|
-
room=room,
|
|
524
|
-
caller=room.local_participant,
|
|
525
|
-
caller_context={ "chat" : context.to_json() }
|
|
526
|
-
)
|
|
527
|
-
result = await handlers[message.type](tool_context, **message.to_dict(mode="json"))
|
|
528
|
-
if result != None:
|
|
529
|
-
return [ result ], False
|
|
530
|
-
else:
|
|
481
|
+
#elif message.type == "computer_call" and tool_bundle.get_tool("computer_call"):
|
|
482
|
+
# with tracer.start_as_current_span("llm.handle_computer_call") as span:
|
|
483
|
+
#
|
|
484
|
+
# computer_call :ResponseComputerToolCall = message
|
|
485
|
+
# span.set_attributes({
|
|
486
|
+
# "id": computer_call.id,
|
|
487
|
+
# "action": computer_call.action,
|
|
488
|
+
# "call_id": computer_call.call_id,
|
|
489
|
+
# "type": json.dumps(computer_call.type)
|
|
490
|
+
# })
|
|
491
|
+
|
|
492
|
+
# tool_context = ToolContext(
|
|
493
|
+
# room=room,
|
|
494
|
+
# caller=room.local_participant,
|
|
495
|
+
# caller_context={ "chat" : context.to_json }
|
|
496
|
+
# )
|
|
497
|
+
# outputs = (await tool_bundle.get_tool("computer_call").execute(context=tool_context, arguments=message.model_dump(mode="json"))).outputs
|
|
498
|
+
|
|
499
|
+
# return outputs, False
|
|
500
|
+
|
|
531
501
|
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
502
|
+
else:
|
|
503
|
+
for toolkit in toolkits:
|
|
504
|
+
for tool in toolkit.tools:
|
|
505
|
+
if isinstance(tool, OpenAIResponsesTool):
|
|
506
|
+
with tracer.start_as_current_span("llm.handle_tool_call") as span:
|
|
507
|
+
|
|
508
|
+
arguments = message.model_dump(mode="json")
|
|
509
|
+
span.set_attributes({
|
|
510
|
+
"type" : message.type,
|
|
511
|
+
"arguments" : safe_json_dump(arguments)
|
|
512
|
+
})
|
|
513
|
+
|
|
514
|
+
handlers = tool.get_open_ai_output_handlers()
|
|
515
|
+
if message.type in handlers:
|
|
516
|
+
tool_context = ToolContext(
|
|
517
|
+
room=room,
|
|
518
|
+
caller=room.local_participant,
|
|
519
|
+
caller_context={ "chat" : context.to_json() }
|
|
520
|
+
)
|
|
521
|
+
result = await handlers[message.type](tool_context, **arguments)
|
|
522
|
+
|
|
523
|
+
if result != None:
|
|
524
|
+
span.set_attribute("result", safe_json_dump(result))
|
|
525
|
+
return [ result ], False
|
|
526
|
+
else:
|
|
527
|
+
|
|
528
|
+
logger.warning(f"OpenAI response handler was not registered for {message.type}")
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
return [], False
|
|
532
|
+
|
|
533
|
+
if stream == False:
|
|
534
|
+
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "response" : response.to_dict() })
|
|
536
535
|
|
|
537
|
-
|
|
538
|
-
room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "response" : response.to_dict() })
|
|
539
|
-
|
|
540
|
-
context.track_response(response.id)
|
|
536
|
+
context.track_response(response.id)
|
|
541
537
|
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
if len(final_outputs) > 0:
|
|
554
|
-
|
|
555
|
-
return final_outputs[0]
|
|
556
|
-
|
|
557
|
-
with tracer.start_as_current_span("llm.turn.check_for_termination") as span:
|
|
538
|
+
final_outputs = []
|
|
539
|
+
|
|
540
|
+
for message in response.output:
|
|
541
|
+
context.previous_messages.append(message.to_dict())
|
|
542
|
+
outputs, done = await handle_message(message=message)
|
|
543
|
+
if done:
|
|
544
|
+
final_outputs.extend(outputs)
|
|
545
|
+
else:
|
|
546
|
+
for output in outputs:
|
|
547
|
+
context.messages.append(output)
|
|
558
548
|
|
|
559
|
-
|
|
560
|
-
if term:
|
|
561
|
-
span.set_attribute("terminate", True)
|
|
562
|
-
text = ""
|
|
563
|
-
for output in response.output:
|
|
564
|
-
if output.type == "message":
|
|
565
|
-
for content in output.content:
|
|
566
|
-
text += content.text
|
|
549
|
+
if len(final_outputs) > 0:
|
|
567
550
|
|
|
568
|
-
return
|
|
569
|
-
|
|
570
|
-
|
|
551
|
+
return final_outputs[0]
|
|
552
|
+
|
|
553
|
+
with tracer.start_as_current_span("llm.turn.check_for_termination") as span:
|
|
554
|
+
|
|
555
|
+
term = await self.check_for_termination(context=context, room=room)
|
|
556
|
+
if term:
|
|
557
|
+
span.set_attribute("terminate", True)
|
|
558
|
+
text = ""
|
|
559
|
+
for output in response.output:
|
|
560
|
+
if output.type == "message":
|
|
561
|
+
for content in output.content:
|
|
562
|
+
text += content.text
|
|
563
|
+
|
|
564
|
+
return text
|
|
565
|
+
else:
|
|
566
|
+
span.set_attribute("terminate", False)
|
|
571
567
|
|
|
572
568
|
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
569
|
+
else:
|
|
570
|
+
|
|
571
|
+
final_outputs = []
|
|
572
|
+
all_outputs = []
|
|
573
|
+
async for e in response:
|
|
574
|
+
with tracer.start_as_current_span("llm.stream.event") as span:
|
|
579
575
|
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
576
|
+
event : ResponseStreamEvent = e
|
|
577
|
+
span.set_attributes({
|
|
578
|
+
"type" : event.type,
|
|
579
|
+
"event" : safe_model_dump(event)
|
|
580
|
+
})
|
|
585
581
|
|
|
586
|
-
|
|
582
|
+
event_handler(event)
|
|
587
583
|
|
|
588
|
-
|
|
584
|
+
if event.type == "response.completed":
|
|
589
585
|
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
586
|
+
|
|
587
|
+
context.track_response(event.response.id)
|
|
588
|
+
|
|
589
|
+
context.messages.extend(all_outputs)
|
|
594
590
|
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
591
|
+
with tracer.start_as_current_span("llm.turn.check_for_termination") as span:
|
|
592
|
+
term = await self.check_for_termination(context=context, room=room)
|
|
593
|
+
|
|
594
|
+
if term:
|
|
595
|
+
span.set_attribute("terminate", True)
|
|
596
|
+
|
|
597
|
+
text = ""
|
|
598
|
+
for output in event.response.output:
|
|
599
|
+
if output.type == "message":
|
|
600
|
+
for content in output.content:
|
|
601
|
+
text += content.text
|
|
606
602
|
|
|
607
|
-
|
|
603
|
+
return text
|
|
608
604
|
|
|
609
|
-
|
|
605
|
+
span.set_attribute("terminate", False)
|
|
610
606
|
|
|
611
607
|
|
|
612
|
-
|
|
608
|
+
all_outputs = []
|
|
613
609
|
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
context.previous_messages.append(event.item.to_dict())
|
|
610
|
+
elif event.type == "response.output_item.done":
|
|
617
611
|
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
612
|
+
context.previous_messages.append(event.item.to_dict())
|
|
613
|
+
|
|
614
|
+
outputs, done = await handle_message(message=event.item)
|
|
615
|
+
if done:
|
|
616
|
+
final_outputs.extend(outputs)
|
|
617
|
+
else:
|
|
618
|
+
for output in outputs:
|
|
619
|
+
all_outputs.append(output)
|
|
624
620
|
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
621
|
+
else:
|
|
622
|
+
for toolkit in toolkits:
|
|
623
|
+
for tool in toolkit.tools:
|
|
628
624
|
|
|
629
|
-
|
|
625
|
+
if isinstance(tool, OpenAIResponsesTool):
|
|
630
626
|
|
|
631
|
-
|
|
627
|
+
callbacks = tool.get_open_ai_stream_callbacks()
|
|
632
628
|
|
|
633
|
-
|
|
629
|
+
if event.type in callbacks:
|
|
634
630
|
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
631
|
+
tool_context = ToolContext(
|
|
632
|
+
room=room,
|
|
633
|
+
caller=room.local_participant,
|
|
634
|
+
caller_context={ "chat" : context.to_json() }
|
|
635
|
+
)
|
|
640
636
|
|
|
641
|
-
|
|
637
|
+
await callbacks[event.type](tool_context, **event.to_dict())
|
|
642
638
|
|
|
643
639
|
|
|
644
|
-
|
|
640
|
+
if len(final_outputs) > 0:
|
|
645
641
|
|
|
646
|
-
|
|
647
|
-
|
|
642
|
+
return final_outputs[0]
|
|
643
|
+
|
|
648
644
|
except APIStatusError as e:
|
|
649
645
|
raise RoomException(f"Error from OpenAI: {e}")
|
|
650
646
|
|
meshagent/openai/tools/stt.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from meshagent.tools import ToolContext, Tool, Toolkit, JsonResponse, TextResponse
|
|
2
2
|
from openai import AsyncOpenAI
|
|
3
|
+
from pydantic import BaseModel
|
|
3
4
|
from meshagent.openai.proxy import get_client
|
|
4
5
|
from typing import Optional
|
|
5
6
|
import io
|
|
@@ -9,7 +10,7 @@ async def _transcribe(*, client: AsyncOpenAI, data: bytes, model: str, filename:
|
|
|
9
10
|
|
|
10
11
|
buf = io.BytesIO(data)
|
|
11
12
|
buf.name = filename
|
|
12
|
-
transcript = await client.audio.transcriptions.create(
|
|
13
|
+
transcript : BaseModel = await client.audio.transcriptions.create(
|
|
13
14
|
model=model,
|
|
14
15
|
response_format=response_format,
|
|
15
16
|
file=buf,
|
|
@@ -22,7 +23,7 @@ async def _transcribe(*, client: AsyncOpenAI, data: bytes, model: str, filename:
|
|
|
22
23
|
if isinstance(transcript, str):
|
|
23
24
|
return TextResponse(text=transcript)
|
|
24
25
|
|
|
25
|
-
return JsonResponse(json=transcript.
|
|
26
|
+
return JsonResponse(json=transcript.model_dump(mode="json"))
|
|
26
27
|
|
|
27
28
|
class OpenAIAudioFileSTT(Tool):
|
|
28
29
|
def __init__(self, *, client: Optional[AsyncOpenAI] = None):
|
meshagent/openai/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.0.
|
|
1
|
+
__version__ = "0.0.36"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: meshagent-openai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.36
|
|
4
4
|
Summary: OpenAI Building Blocks for Meshagent
|
|
5
5
|
License-Expression: Apache-2.0
|
|
6
6
|
Project-URL: Documentation, https://docs.meshagent.com
|
|
@@ -9,13 +9,13 @@ Project-URL: Source, https://www.meshagent.com
|
|
|
9
9
|
Requires-Python: >=3.12
|
|
10
10
|
Description-Content-Type: text/markdown
|
|
11
11
|
License-File: LICENSE
|
|
12
|
-
Requires-Dist: pyjwt~=2.10
|
|
13
|
-
Requires-Dist: pytest~=8.3
|
|
14
|
-
Requires-Dist: pytest-asyncio~=0.26
|
|
15
|
-
Requires-Dist: openai~=1.
|
|
16
|
-
Requires-Dist: meshagent-api~=0.0.
|
|
17
|
-
Requires-Dist: meshagent-agents~=0.0.
|
|
18
|
-
Requires-Dist: meshagent-tools~=0.0.
|
|
12
|
+
Requires-Dist: pyjwt~=2.10
|
|
13
|
+
Requires-Dist: pytest~=8.3
|
|
14
|
+
Requires-Dist: pytest-asyncio~=0.26
|
|
15
|
+
Requires-Dist: openai~=1.84
|
|
16
|
+
Requires-Dist: meshagent-api~=0.0.36
|
|
17
|
+
Requires-Dist: meshagent-agents~=0.0.36
|
|
18
|
+
Requires-Dist: meshagent-tools~=0.0.36
|
|
19
19
|
Dynamic: license-file
|
|
20
20
|
|
|
21
21
|
### Meshagent OpenAI
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
meshagent/openai/__init__.py,sha256=4JRby-ltGfJzrNYhJkMNIpVc2ml2zL_JkkFC0T1_8Vk,174
|
|
2
|
+
meshagent/openai/version.py,sha256=8XOR9xXboOEdDoZvWO2gEX-ufe6IVa50eWNDhT4ctHI,22
|
|
3
|
+
meshagent/openai/proxy/__init__.py,sha256=SqoueAmMXHbDKd8O4EeqGkI0gEiC3xLTLlpESGxySPU,30
|
|
4
|
+
meshagent/openai/proxy/proxy.py,sha256=JG3I6doIJXPkeZUWb6h93xEv5i1GO4I1cSuWDfLlbf8,883
|
|
5
|
+
meshagent/openai/tools/__init__.py,sha256=SRJpWc_L9jv1c8aBLULflDg8co1kaw2Ffnr6hDkYEwg,240
|
|
6
|
+
meshagent/openai/tools/completions_adapter.py,sha256=M8PpyaLu02QwrYkLB3c1h72J3wlmrK3UdfNKx6yUDJk,14483
|
|
7
|
+
meshagent/openai/tools/responses_adapter.py,sha256=ZsfWcmiasdpbiz27eaGpHN-_df2dD6Xpf4iCGbGNcGg,53608
|
|
8
|
+
meshagent/openai/tools/schema.py,sha256=7WvWFWK65G123G6ADxR27wA8vVpB_Twc3ZXlrYulMZg,9572
|
|
9
|
+
meshagent/openai/tools/stt.py,sha256=6Ig8h-0wO0OCG6WKikp15HGqIVBKAWrP8HLzQimuvNk,3611
|
|
10
|
+
meshagent/openai/tools/stt_test.py,sha256=FCTWZ7bI0vUnTRjRivO_5QEZqHaTE0ehNp1QQkx8iJ0,2651
|
|
11
|
+
meshagent_openai-0.0.36.dist-info/licenses/LICENSE,sha256=eTt0SPW-sVNdkZe9PS_S8WfCIyLjRXRl7sUBWdlteFg,10254
|
|
12
|
+
meshagent_openai-0.0.36.dist-info/METADATA,sha256=o34A3RrATuAbp3PasT_aiI4qN-Ytci5MPcvOoNuDCWQ,652
|
|
13
|
+
meshagent_openai-0.0.36.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
14
|
+
meshagent_openai-0.0.36.dist-info/top_level.txt,sha256=GlcXnHtRP6m7zlG3Df04M35OsHtNXy_DY09oFwWrH74,10
|
|
15
|
+
meshagent_openai-0.0.36.dist-info/RECORD,,
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
meshagent/openai/__init__.py,sha256=4JRby-ltGfJzrNYhJkMNIpVc2ml2zL_JkkFC0T1_8Vk,174
|
|
2
|
-
meshagent/openai/version.py,sha256=ws3BQQ_HUvFzVrPwfmrsx1ZpX_ij4MUX-YY3k_0qRB8,22
|
|
3
|
-
meshagent/openai/proxy/__init__.py,sha256=SqoueAmMXHbDKd8O4EeqGkI0gEiC3xLTLlpESGxySPU,30
|
|
4
|
-
meshagent/openai/proxy/proxy.py,sha256=Hc0IPkVmOyxEdiZqk3v-1muVFmFEwdWLVWRAj4cQJpA,571
|
|
5
|
-
meshagent/openai/tools/__init__.py,sha256=SRJpWc_L9jv1c8aBLULflDg8co1kaw2Ffnr6hDkYEwg,240
|
|
6
|
-
meshagent/openai/tools/completions_adapter.py,sha256=M8PpyaLu02QwrYkLB3c1h72J3wlmrK3UdfNKx6yUDJk,14483
|
|
7
|
-
meshagent/openai/tools/responses_adapter.py,sha256=tnAAZmXIUBaTvdOOyYF1PpI5M_8vjmizXt_TZ50PKyk,53297
|
|
8
|
-
meshagent/openai/tools/schema.py,sha256=7WvWFWK65G123G6ADxR27wA8vVpB_Twc3ZXlrYulMZg,9572
|
|
9
|
-
meshagent/openai/tools/stt.py,sha256=08QcfIcdUZgGRhgK-mwrkabKApE7uwhe4fG5invYSh0,3565
|
|
10
|
-
meshagent/openai/tools/stt_test.py,sha256=FCTWZ7bI0vUnTRjRivO_5QEZqHaTE0ehNp1QQkx8iJ0,2651
|
|
11
|
-
meshagent_openai-0.0.34.dist-info/licenses/LICENSE,sha256=eTt0SPW-sVNdkZe9PS_S8WfCIyLjRXRl7sUBWdlteFg,10254
|
|
12
|
-
meshagent_openai-0.0.34.dist-info/METADATA,sha256=MUpBAEKDgu1vU6kzlqp3wDM9F7NLhJLrXf5cD8kJUi8,660
|
|
13
|
-
meshagent_openai-0.0.34.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
14
|
-
meshagent_openai-0.0.34.dist-info/top_level.txt,sha256=GlcXnHtRP6m7zlG3Df04M35OsHtNXy_DY09oFwWrH74,10
|
|
15
|
-
meshagent_openai-0.0.34.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|