meshagent-openai 0.0.34__py3-none-any.whl → 0.0.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of meshagent-openai might be problematic. Click here for more details.

@@ -5,7 +5,11 @@ from openai import AsyncOpenAI
5
5
  def get_client(*, room: RoomClient) -> AsyncOpenAI:
6
6
 
7
7
  token : str = room.protocol.token
8
- url : str = room.room_url
8
+
9
+ # when running inside the room pod, the room.room_url currently points to the external url
10
+ # so we need to use url off the protocol (if available).
11
+ # TODO: room_url should be set properly, but may need a claim in the token to be set during call to say it is local
12
+ url : str = getattr(room.protocol, "url", room.room_url)
9
13
 
10
14
  room_proxy_url = f"{url}/v1"
11
15
 
@@ -19,6 +19,7 @@ import re
19
19
  import asyncio
20
20
 
21
21
  from pydantic import BaseModel
22
+ import copy
22
23
 
23
24
  logger = logging.getLogger("openai_agent")
24
25
 
@@ -26,12 +27,18 @@ from opentelemetry import trace
26
27
 
27
28
  tracer = trace.get_tracer("openai.llm.responses")
28
29
 
30
+
31
+ def safe_json_dump(data: dict):
32
+
33
+ return json.dumps(copy.deepcopy(data))
34
+
29
35
  def safe_model_dump(model: BaseModel):
30
36
  try:
31
- return model.model_dump_json()
37
+ return safe_json_dump(model.model_dump(mode='json'))
32
38
  except:
33
39
  return {"error":"unable to dump json for model"}
34
40
 
41
+
35
42
  def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> str:
36
43
  """
37
44
  Replaces every character in `text` that does not match the given
@@ -216,39 +223,43 @@ class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
216
223
 
217
224
  async def create_messages(self, *, context: AgentChatContext, tool_call: ResponseFunctionToolCall, room: RoomClient, response: Response) -> list:
218
225
 
219
- if isinstance(response, RawOutputs):
226
+ with tracer.start_as_current_span("llm.tool_adapter.create_messages") as span:
227
+
220
228
 
221
- for output in response.outputs:
222
-
223
- room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : output })
224
-
225
- return response.outputs
226
- else:
227
- output = await self.to_plain_text(room=room, response=response)
228
-
229
- message = {
230
- "output" : output,
231
- "call_id" : tool_call.call_id,
232
- "type" : "function_call_output"
233
- }
229
+ if isinstance(response, RawOutputs):
230
+ span.set_attribute("kind", "raw")
231
+ for output in response.outputs:
232
+
233
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : output })
234
+
235
+ return response.outputs
236
+ else:
234
237
 
235
- room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message })
238
+ span.set_attribute("kind", "text")
239
+ output = await self.to_plain_text(room=room, response=response)
240
+ span.set_attribute("output", output)
241
+
242
+ message = {
243
+ "output" : output,
244
+ "call_id" : tool_call.call_id,
245
+ "type" : "function_call_output"
246
+ }
247
+
248
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message })
236
249
 
237
- return [ message ]
250
+ return [ message ]
238
251
 
239
252
  class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
240
253
  def __init__(self,
241
254
  model: str = os.getenv("OPENAI_MODEL","gpt-4.1"),
242
255
  parallel_tool_calls : Optional[bool] = None,
243
256
  client: Optional[AsyncOpenAI] = None,
244
- retries : int = 0,
245
257
  response_options : Optional[dict] = None,
246
258
  provider: str = "openai"
247
259
  ):
248
260
  self._model = model
249
261
  self._parallel_tool_calls = parallel_tool_calls
250
262
  self._client = client
251
- self._retries = retries
252
263
  self._response_options = response_options
253
264
  self._provider = provider
254
265
 
@@ -273,13 +284,10 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
273
284
  async def check_for_termination(self, *, context: AgentChatContext, room: RoomClient) -> bool:
274
285
  if len(context.previous_messages) > 0:
275
286
  last_message = context.previous_messages[-1]
276
- logger.info(f"last_message {last_message}")
277
-
287
+
278
288
  for message in context.messages:
279
289
 
280
290
  if message.get("type", "message") != "message":
281
- logger.info(f"found {message.get("type", "message")}")
282
-
283
291
  return False
284
292
 
285
293
  return True
@@ -328,14 +336,9 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
328
336
  ])
329
337
  open_ai_tools = tool_bundle.to_json()
330
338
 
331
- if open_ai_tools != None:
332
- logger.info("OpenAI Tools: %s", json.dumps(open_ai_tools))
333
- else:
334
- logger.info("OpenAI Tools: Empty")
339
+ if open_ai_tools == None:
335
340
  open_ai_tools = NOT_GIVEN
336
341
 
337
-
338
- logger.info("model: %s, context: %s, output_schema: %s", self._model, context.messages, output_schema)
339
342
  ptc = self._parallel_tool_calls
340
343
  extra = {}
341
344
  if ptc != None and self._model.startswith("o") == False:
@@ -365,108 +368,91 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
365
368
 
366
369
  stream = event_handler != None
367
370
 
368
- for i in range(self._retries + 1):
369
-
370
- if range == self._retries:
371
- raise RoomException("exceeded maximum attempts calling openai")
372
- try:
373
- with tracer.start_as_current_span("llm.invoke") as span:
374
- response_options = self._response_options
375
- if response_options == None:
376
- response_options = {}
377
- response : Response = await openai.responses.create(
378
- stream=stream,
379
- model = self._model,
380
- input = context.messages,
381
- tools = open_ai_tools,
382
- text = text,
383
- previous_response_id=previous_response_id,
384
-
385
- **response_options
386
- )
387
- break
388
- except APIStatusError as e:
389
- logger.error(f"error calling openai attempt: {i+1} ({e.response.request.url})", exc_info=e)
390
- raise
391
- except Exception as e:
392
- logger.error(f"error calling openai attempt: {i+1}", exc_info=e)
393
- if i == self._retries:
394
- raise
395
-
396
-
397
- async def handle_message(message: BaseModel):
371
+
372
+ with tracer.start_as_current_span("llm.invoke") as span:
373
+ response_options = self._response_options
374
+ if response_options == None:
375
+ response_options = {}
376
+ response : Response = await openai.responses.create(
377
+ stream=stream,
378
+ model = self._model,
379
+ input = context.messages,
380
+ tools = open_ai_tools,
381
+ text = text,
382
+ previous_response_id=previous_response_id,
383
+
384
+ **response_options
385
+ )
398
386
 
387
+ async def handle_message(message: BaseModel):
399
388
 
389
+ with tracer.start_as_current_span("llm.handle_response") as span:
400
390
 
401
- with tracer.start_as_current_span("llm.handle_response") as span:
391
+ span.set_attributes({
392
+ "type" : message.type,
393
+ "message" : safe_model_dump(message)
394
+ })
395
+
396
+ room.developer.log_nowait(type=f"llm.message", data={
397
+ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message.to_dict()
398
+ })
402
399
 
403
- span.set_attributes({
404
- "type" : message.type,
405
- "message" : safe_model_dump(message)
406
- })
407
-
408
- room.developer.log_nowait(type=f"llm.message", data={
409
- "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message.to_dict()
410
- })
400
+ if message.type == "function_call":
401
+
402
+ tasks = []
411
403
 
412
- if message.type == "function_call":
413
-
414
- tasks = []
404
+ async def do_tool_call(tool_call: ResponseFunctionToolCall):
405
+
406
+ try:
407
+ with tracer.start_as_current_span("llm.handle_tool_call") as span:
408
+
409
+ span.set_attributes({
410
+ "id": tool_call.id,
411
+ "name": tool_call.name,
412
+ "call_id": tool_call.call_id,
413
+ "arguments": json.dumps(tool_call.arguments)
414
+ })
415
415
 
416
- async def do_tool_call(tool_call: ResponseFunctionToolCall):
417
-
418
- try:
419
- with tracer.start_as_current_span("llm.handle_tool_call") as span:
416
+ tool_context = ToolContext(
417
+ room=room,
418
+ caller=room.local_participant,
419
+ caller_context={ "chat" : context.to_json() }
420
+ )
421
+ tool_response = await tool_bundle.execute(context=tool_context, tool_call=tool_call)
422
+ if tool_response.caller_context != None:
423
+ if tool_response.caller_context.get("chat", None) != None:
424
+ tool_chat_context = AgentChatContext.from_json(tool_response.caller_context["chat"])
425
+ if tool_chat_context.previous_response_id != None:
426
+ context.track_response(tool_chat_context.previous_response_id)
427
+
428
+ logger.info(f"tool response {tool_response}")
429
+ return await tool_adapter.create_messages(context=context, tool_call=tool_call, room=room, response=tool_response)
420
430
 
421
- span.set_attributes({
422
- "id": tool_call.id,
423
- "name": tool_call.name,
424
- "call_id": tool_call.call_id,
425
- "arguments": json.dumps(tool_call.arguments)
426
- })
427
-
428
- tool_context = ToolContext(
429
- room=room,
430
- caller=room.local_participant,
431
- caller_context={ "chat" : context.to_json() }
432
- )
433
- tool_response = await tool_bundle.execute(context=tool_context, tool_call=tool_call)
434
- if tool_response.caller_context != None:
435
- if tool_response.caller_context.get("chat", None) != None:
436
- tool_chat_context = AgentChatContext.from_json(tool_response.caller_context["chat"])
437
- if tool_chat_context.previous_response_id != None:
438
- context.track_response(tool_chat_context.previous_response_id)
439
-
440
- span.set_attribute("response", await tool_adapter.to_plain_text(room=room, response=tool_response))
441
-
442
- logger.info(f"tool response {tool_response}")
443
- return await tool_adapter.create_messages(context=context, tool_call=tool_call, room=room, response=tool_response)
444
-
445
- except Exception as e:
446
- logger.error(f"unable to complete tool call {tool_call}", exc_info=e)
447
- room.developer.log_nowait(type="llm.error", data={ "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "error" : f"{e}" })
448
-
449
- return [{
450
- "output" : json.dumps({"error":f"unable to complete tool call: {e}"}),
451
- "call_id" : tool_call.call_id,
452
- "type" : "function_call_output"
453
- }]
431
+ except Exception as e:
432
+ logger.error(f"unable to complete tool call {tool_call}", exc_info=e)
433
+ room.developer.log_nowait(type="llm.error", data={ "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "error" : f"{e}" })
434
+
435
+ return [{
436
+ "output" : json.dumps({"error":f"unable to complete tool call: {e}"}),
437
+ "call_id" : tool_call.call_id,
438
+ "type" : "function_call_output"
439
+ }]
454
440
 
455
441
 
456
- tasks.append(asyncio.create_task(do_tool_call(message)))
442
+ tasks.append(asyncio.create_task(do_tool_call(message)))
457
443
 
458
- results = await asyncio.gather(*tasks)
444
+ results = await asyncio.gather(*tasks)
459
445
 
460
- all_results = []
461
- for result in results:
462
- room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : result })
463
- all_results.extend(result)
446
+ all_results = []
447
+ for result in results:
448
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : result })
449
+ all_results.extend(result)
464
450
 
465
- return all_results, False
451
+ return all_results, False
466
452
 
467
- elif message.type == "message":
468
- with tracer.start_as_current_span("llm.handle_message") as span:
469
-
453
+ elif message.type == "message":
454
+
455
+
470
456
  contents = message.content
471
457
  if response_schema == None:
472
458
  return [], False
@@ -492,159 +478,169 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
492
478
  continue
493
479
 
494
480
  return [ full_response ], True
495
- #elif message.type == "computer_call" and tool_bundle.get_tool("computer_call"):
496
- # with tracer.start_as_current_span("llm.handle_computer_call") as span:
497
- #
498
- # computer_call :ResponseComputerToolCall = message
499
- # span.set_attributes({
500
- # "id": computer_call.id,
501
- # "action": computer_call.action,
502
- # "call_id": computer_call.call_id,
503
- # "type": json.dumps(computer_call.type)
504
- # })
505
-
506
- # tool_context = ToolContext(
507
- # room=room,
508
- # caller=room.local_participant,
509
- # caller_context={ "chat" : context.to_json }
510
- # )
511
- # outputs = (await tool_bundle.get_tool("computer_call").execute(context=tool_context, arguments=message.to_dict(mode="json"))).outputs
512
-
513
- # return outputs, False
514
-
515
-
516
- else:
517
- for toolkit in toolkits:
518
- for tool in toolkit.tools:
519
- if isinstance(tool, OpenAIResponsesTool):
520
- handlers = tool.get_open_ai_output_handlers()
521
- if message.type in handlers:
522
- tool_context = ToolContext(
523
- room=room,
524
- caller=room.local_participant,
525
- caller_context={ "chat" : context.to_json() }
526
- )
527
- result = await handlers[message.type](tool_context, **message.to_dict(mode="json"))
528
- if result != None:
529
- return [ result ], False
530
- else:
481
+ #elif message.type == "computer_call" and tool_bundle.get_tool("computer_call"):
482
+ # with tracer.start_as_current_span("llm.handle_computer_call") as span:
483
+ #
484
+ # computer_call :ResponseComputerToolCall = message
485
+ # span.set_attributes({
486
+ # "id": computer_call.id,
487
+ # "action": computer_call.action,
488
+ # "call_id": computer_call.call_id,
489
+ # "type": json.dumps(computer_call.type)
490
+ # })
491
+
492
+ # tool_context = ToolContext(
493
+ # room=room,
494
+ # caller=room.local_participant,
495
+ # caller_context={ "chat" : context.to_json }
496
+ # )
497
+ # outputs = (await tool_bundle.get_tool("computer_call").execute(context=tool_context, arguments=message.model_dump(mode="json"))).outputs
498
+
499
+ # return outputs, False
500
+
531
501
 
532
- logger.warning(f"OpenAI response handler was not registered for {message.type}")
533
-
534
-
535
- return [], False
502
+ else:
503
+ for toolkit in toolkits:
504
+ for tool in toolkit.tools:
505
+ if isinstance(tool, OpenAIResponsesTool):
506
+ with tracer.start_as_current_span("llm.handle_tool_call") as span:
507
+
508
+ arguments = message.model_dump(mode="json")
509
+ span.set_attributes({
510
+ "type" : message.type,
511
+ "arguments" : safe_json_dump(arguments)
512
+ })
513
+
514
+ handlers = tool.get_open_ai_output_handlers()
515
+ if message.type in handlers:
516
+ tool_context = ToolContext(
517
+ room=room,
518
+ caller=room.local_participant,
519
+ caller_context={ "chat" : context.to_json() }
520
+ )
521
+ result = await handlers[message.type](tool_context, **arguments)
522
+
523
+ if result != None:
524
+ span.set_attribute("result", safe_json_dump(result))
525
+ return [ result ], False
526
+ else:
527
+
528
+ logger.warning(f"OpenAI response handler was not registered for {message.type}")
529
+
530
+
531
+ return [], False
532
+
533
+ if stream == False:
534
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "response" : response.to_dict() })
536
535
 
537
- if stream == False:
538
- room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "response" : response.to_dict() })
539
-
540
- context.track_response(response.id)
536
+ context.track_response(response.id)
541
537
 
542
- final_outputs = []
543
-
544
- for message in response.output:
545
- context.previous_messages.append(message.to_dict())
546
- outputs, done = await handle_message(message=message)
547
- if done:
548
- final_outputs.extend(outputs)
549
- else:
550
- for output in outputs:
551
- context.messages.append(output)
552
-
553
- if len(final_outputs) > 0:
554
-
555
- return final_outputs[0]
556
-
557
- with tracer.start_as_current_span("llm.turn.check_for_termination") as span:
538
+ final_outputs = []
539
+
540
+ for message in response.output:
541
+ context.previous_messages.append(message.to_dict())
542
+ outputs, done = await handle_message(message=message)
543
+ if done:
544
+ final_outputs.extend(outputs)
545
+ else:
546
+ for output in outputs:
547
+ context.messages.append(output)
558
548
 
559
- term = await self.check_for_termination(context=context, room=room)
560
- if term:
561
- span.set_attribute("terminate", True)
562
- text = ""
563
- for output in response.output:
564
- if output.type == "message":
565
- for content in output.content:
566
- text += content.text
549
+ if len(final_outputs) > 0:
567
550
 
568
- return text
569
- else:
570
- span.set_attribute("terminate", False)
551
+ return final_outputs[0]
552
+
553
+ with tracer.start_as_current_span("llm.turn.check_for_termination") as span:
554
+
555
+ term = await self.check_for_termination(context=context, room=room)
556
+ if term:
557
+ span.set_attribute("terminate", True)
558
+ text = ""
559
+ for output in response.output:
560
+ if output.type == "message":
561
+ for content in output.content:
562
+ text += content.text
563
+
564
+ return text
565
+ else:
566
+ span.set_attribute("terminate", False)
571
567
 
572
568
 
573
- else:
574
-
575
- final_outputs = []
576
- all_outputs = []
577
- async for e in response:
578
- with tracer.start_as_current_span("llm.stream.event") as span:
569
+ else:
570
+
571
+ final_outputs = []
572
+ all_outputs = []
573
+ async for e in response:
574
+ with tracer.start_as_current_span("llm.stream.event") as span:
579
575
 
580
- event : ResponseStreamEvent = e
581
- span.set_attributes({
582
- "type" : event.type,
583
- "event" : safe_model_dump(event)
584
- })
576
+ event : ResponseStreamEvent = e
577
+ span.set_attributes({
578
+ "type" : event.type,
579
+ "event" : safe_model_dump(event)
580
+ })
585
581
 
586
- event_handler(event)
582
+ event_handler(event)
587
583
 
588
- if event.type == "response.completed":
584
+ if event.type == "response.completed":
589
585
 
590
-
591
- context.track_response(event.response.id)
592
-
593
- context.messages.extend(all_outputs)
586
+
587
+ context.track_response(event.response.id)
588
+
589
+ context.messages.extend(all_outputs)
594
590
 
595
- with tracer.start_as_current_span("llm.turn.check_for_termination") as span:
596
- term = await self.check_for_termination(context=context, room=room)
597
-
598
- if term:
599
- span.set_attribute("terminate", True)
600
-
601
- text = ""
602
- for output in event.response.output:
603
- if output.type == "message":
604
- for content in output.content:
605
- text += content.text
591
+ with tracer.start_as_current_span("llm.turn.check_for_termination") as span:
592
+ term = await self.check_for_termination(context=context, room=room)
593
+
594
+ if term:
595
+ span.set_attribute("terminate", True)
596
+
597
+ text = ""
598
+ for output in event.response.output:
599
+ if output.type == "message":
600
+ for content in output.content:
601
+ text += content.text
606
602
 
607
- return text
603
+ return text
608
604
 
609
- span.set_attribute("terminate", False)
605
+ span.set_attribute("terminate", False)
610
606
 
611
607
 
612
- all_outputs = []
608
+ all_outputs = []
613
609
 
614
- elif event.type == "response.output_item.done":
615
-
616
- context.previous_messages.append(event.item.to_dict())
610
+ elif event.type == "response.output_item.done":
617
611
 
618
- outputs, done = await handle_message(message=event.item)
619
- if done:
620
- final_outputs.extend(outputs)
621
- else:
622
- for output in outputs:
623
- all_outputs.append(output)
612
+ context.previous_messages.append(event.item.to_dict())
613
+
614
+ outputs, done = await handle_message(message=event.item)
615
+ if done:
616
+ final_outputs.extend(outputs)
617
+ else:
618
+ for output in outputs:
619
+ all_outputs.append(output)
624
620
 
625
- else:
626
- for toolkit in toolkits:
627
- for tool in toolkit.tools:
621
+ else:
622
+ for toolkit in toolkits:
623
+ for tool in toolkit.tools:
628
624
 
629
- if isinstance(tool, OpenAIResponsesTool):
625
+ if isinstance(tool, OpenAIResponsesTool):
630
626
 
631
- callbacks = tool.get_open_ai_stream_callbacks()
627
+ callbacks = tool.get_open_ai_stream_callbacks()
632
628
 
633
- if event.type in callbacks:
629
+ if event.type in callbacks:
634
630
 
635
- tool_context = ToolContext(
636
- room=room,
637
- caller=room.local_participant,
638
- caller_context={ "chat" : context.to_json() }
639
- )
631
+ tool_context = ToolContext(
632
+ room=room,
633
+ caller=room.local_participant,
634
+ caller_context={ "chat" : context.to_json() }
635
+ )
640
636
 
641
- await callbacks[event.type](tool_context, **event.to_dict())
637
+ await callbacks[event.type](tool_context, **event.to_dict())
642
638
 
643
639
 
644
- if len(final_outputs) > 0:
640
+ if len(final_outputs) > 0:
645
641
 
646
- return final_outputs[0]
647
-
642
+ return final_outputs[0]
643
+
648
644
  except APIStatusError as e:
649
645
  raise RoomException(f"Error from OpenAI: {e}")
650
646
 
@@ -1,5 +1,6 @@
1
1
  from meshagent.tools import ToolContext, Tool, Toolkit, JsonResponse, TextResponse
2
2
  from openai import AsyncOpenAI
3
+ from pydantic import BaseModel
3
4
  from meshagent.openai.proxy import get_client
4
5
  from typing import Optional
5
6
  import io
@@ -9,7 +10,7 @@ async def _transcribe(*, client: AsyncOpenAI, data: bytes, model: str, filename:
9
10
 
10
11
  buf = io.BytesIO(data)
11
12
  buf.name = filename
12
- transcript = await client.audio.transcriptions.create(
13
+ transcript : BaseModel = await client.audio.transcriptions.create(
13
14
  model=model,
14
15
  response_format=response_format,
15
16
  file=buf,
@@ -22,7 +23,7 @@ async def _transcribe(*, client: AsyncOpenAI, data: bytes, model: str, filename:
22
23
  if isinstance(transcript, str):
23
24
  return TextResponse(text=transcript)
24
25
 
25
- return JsonResponse(json=transcript.to_dict(mode="json"))
26
+ return JsonResponse(json=transcript.model_dump(mode="json"))
26
27
 
27
28
  class OpenAIAudioFileSTT(Tool):
28
29
  def __init__(self, *, client: Optional[AsyncOpenAI] = None):
@@ -1 +1 @@
1
- __version__ = "0.0.34"
1
+ __version__ = "0.0.36"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: meshagent-openai
3
- Version: 0.0.34
3
+ Version: 0.0.36
4
4
  Summary: OpenAI Building Blocks for Meshagent
5
5
  License-Expression: Apache-2.0
6
6
  Project-URL: Documentation, https://docs.meshagent.com
@@ -9,13 +9,13 @@ Project-URL: Source, https://www.meshagent.com
9
9
  Requires-Python: >=3.12
10
10
  Description-Content-Type: text/markdown
11
11
  License-File: LICENSE
12
- Requires-Dist: pyjwt~=2.10.1
13
- Requires-Dist: pytest~=8.3.5
14
- Requires-Dist: pytest-asyncio~=0.26.0
15
- Requires-Dist: openai~=1.86.0
16
- Requires-Dist: meshagent-api~=0.0.34
17
- Requires-Dist: meshagent-agents~=0.0.34
18
- Requires-Dist: meshagent-tools~=0.0.34
12
+ Requires-Dist: pyjwt~=2.10
13
+ Requires-Dist: pytest~=8.3
14
+ Requires-Dist: pytest-asyncio~=0.26
15
+ Requires-Dist: openai~=1.84
16
+ Requires-Dist: meshagent-api~=0.0.36
17
+ Requires-Dist: meshagent-agents~=0.0.36
18
+ Requires-Dist: meshagent-tools~=0.0.36
19
19
  Dynamic: license-file
20
20
 
21
21
  ### Meshagent OpenAI
@@ -0,0 +1,15 @@
1
+ meshagent/openai/__init__.py,sha256=4JRby-ltGfJzrNYhJkMNIpVc2ml2zL_JkkFC0T1_8Vk,174
2
+ meshagent/openai/version.py,sha256=8XOR9xXboOEdDoZvWO2gEX-ufe6IVa50eWNDhT4ctHI,22
3
+ meshagent/openai/proxy/__init__.py,sha256=SqoueAmMXHbDKd8O4EeqGkI0gEiC3xLTLlpESGxySPU,30
4
+ meshagent/openai/proxy/proxy.py,sha256=JG3I6doIJXPkeZUWb6h93xEv5i1GO4I1cSuWDfLlbf8,883
5
+ meshagent/openai/tools/__init__.py,sha256=SRJpWc_L9jv1c8aBLULflDg8co1kaw2Ffnr6hDkYEwg,240
6
+ meshagent/openai/tools/completions_adapter.py,sha256=M8PpyaLu02QwrYkLB3c1h72J3wlmrK3UdfNKx6yUDJk,14483
7
+ meshagent/openai/tools/responses_adapter.py,sha256=ZsfWcmiasdpbiz27eaGpHN-_df2dD6Xpf4iCGbGNcGg,53608
8
+ meshagent/openai/tools/schema.py,sha256=7WvWFWK65G123G6ADxR27wA8vVpB_Twc3ZXlrYulMZg,9572
9
+ meshagent/openai/tools/stt.py,sha256=6Ig8h-0wO0OCG6WKikp15HGqIVBKAWrP8HLzQimuvNk,3611
10
+ meshagent/openai/tools/stt_test.py,sha256=FCTWZ7bI0vUnTRjRivO_5QEZqHaTE0ehNp1QQkx8iJ0,2651
11
+ meshagent_openai-0.0.36.dist-info/licenses/LICENSE,sha256=eTt0SPW-sVNdkZe9PS_S8WfCIyLjRXRl7sUBWdlteFg,10254
12
+ meshagent_openai-0.0.36.dist-info/METADATA,sha256=o34A3RrATuAbp3PasT_aiI4qN-Ytci5MPcvOoNuDCWQ,652
13
+ meshagent_openai-0.0.36.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
+ meshagent_openai-0.0.36.dist-info/top_level.txt,sha256=GlcXnHtRP6m7zlG3Df04M35OsHtNXy_DY09oFwWrH74,10
15
+ meshagent_openai-0.0.36.dist-info/RECORD,,
@@ -1,15 +0,0 @@
1
- meshagent/openai/__init__.py,sha256=4JRby-ltGfJzrNYhJkMNIpVc2ml2zL_JkkFC0T1_8Vk,174
2
- meshagent/openai/version.py,sha256=ws3BQQ_HUvFzVrPwfmrsx1ZpX_ij4MUX-YY3k_0qRB8,22
3
- meshagent/openai/proxy/__init__.py,sha256=SqoueAmMXHbDKd8O4EeqGkI0gEiC3xLTLlpESGxySPU,30
4
- meshagent/openai/proxy/proxy.py,sha256=Hc0IPkVmOyxEdiZqk3v-1muVFmFEwdWLVWRAj4cQJpA,571
5
- meshagent/openai/tools/__init__.py,sha256=SRJpWc_L9jv1c8aBLULflDg8co1kaw2Ffnr6hDkYEwg,240
6
- meshagent/openai/tools/completions_adapter.py,sha256=M8PpyaLu02QwrYkLB3c1h72J3wlmrK3UdfNKx6yUDJk,14483
7
- meshagent/openai/tools/responses_adapter.py,sha256=tnAAZmXIUBaTvdOOyYF1PpI5M_8vjmizXt_TZ50PKyk,53297
8
- meshagent/openai/tools/schema.py,sha256=7WvWFWK65G123G6ADxR27wA8vVpB_Twc3ZXlrYulMZg,9572
9
- meshagent/openai/tools/stt.py,sha256=08QcfIcdUZgGRhgK-mwrkabKApE7uwhe4fG5invYSh0,3565
10
- meshagent/openai/tools/stt_test.py,sha256=FCTWZ7bI0vUnTRjRivO_5QEZqHaTE0ehNp1QQkx8iJ0,2651
11
- meshagent_openai-0.0.34.dist-info/licenses/LICENSE,sha256=eTt0SPW-sVNdkZe9PS_S8WfCIyLjRXRl7sUBWdlteFg,10254
12
- meshagent_openai-0.0.34.dist-info/METADATA,sha256=MUpBAEKDgu1vU6kzlqp3wDM9F7NLhJLrXf5cD8kJUi8,660
13
- meshagent_openai-0.0.34.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
- meshagent_openai-0.0.34.dist-info/top_level.txt,sha256=GlcXnHtRP6m7zlG3Df04M35OsHtNXy_DY09oFwWrH74,10
15
- meshagent_openai-0.0.34.dist-info/RECORD,,