meshagent-openai 0.0.37__tar.gz → 0.0.39__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of meshagent-openai might be problematic. Click here for more details.

Files changed (32) hide show
  1. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/CHANGELOG.md +6 -0
  2. meshagent_openai-0.0.39/PKG-INFO +50 -0
  3. meshagent_openai-0.0.39/README.md +30 -0
  4. meshagent_openai-0.0.39/meshagent/openai/__init__.py +16 -0
  5. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/meshagent/openai/proxy/__init__.py +2 -0
  6. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/meshagent/openai/proxy/proxy.py +7 -10
  7. meshagent_openai-0.0.39/meshagent/openai/tools/__init__.py +18 -0
  8. meshagent_openai-0.0.39/meshagent/openai/tools/completions_adapter.py +452 -0
  9. meshagent_openai-0.0.39/meshagent/openai/tools/responses_adapter.py +1705 -0
  10. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/meshagent/openai/tools/schema.py +100 -49
  11. meshagent_openai-0.0.39/meshagent/openai/tools/stt.py +118 -0
  12. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/meshagent/openai/tools/stt_test.py +6 -4
  13. meshagent_openai-0.0.39/meshagent/openai/version.py +1 -0
  14. meshagent_openai-0.0.39/meshagent_openai.egg-info/PKG-INFO +50 -0
  15. meshagent_openai-0.0.39/meshagent_openai.egg-info/requires.txt +7 -0
  16. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/pyproject.toml +4 -4
  17. meshagent_openai-0.0.37/PKG-INFO +0 -21
  18. meshagent_openai-0.0.37/README.md +0 -1
  19. meshagent_openai-0.0.37/meshagent/openai/__init__.py +0 -2
  20. meshagent_openai-0.0.37/meshagent/openai/tools/__init__.py +0 -3
  21. meshagent_openai-0.0.37/meshagent/openai/tools/completions_adapter.py +0 -365
  22. meshagent_openai-0.0.37/meshagent/openai/tools/responses_adapter.py +0 -1177
  23. meshagent_openai-0.0.37/meshagent/openai/tools/stt.py +0 -89
  24. meshagent_openai-0.0.37/meshagent/openai/version.py +0 -1
  25. meshagent_openai-0.0.37/meshagent_openai.egg-info/PKG-INFO +0 -21
  26. meshagent_openai-0.0.37/meshagent_openai.egg-info/requires.txt +0 -7
  27. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/LICENSE +0 -0
  28. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/MANIFEST.in +0 -0
  29. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/meshagent_openai.egg-info/SOURCES.txt +0 -0
  30. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/meshagent_openai.egg-info/dependency_links.txt +0 -0
  31. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/meshagent_openai.egg-info/top_level.txt +0 -0
  32. {meshagent_openai-0.0.37 → meshagent_openai-0.0.39}/setup.cfg +0 -0
@@ -1,3 +1,9 @@
1
+ ## [0.0.39]
2
+ - Stability
3
+
4
+ ## [0.0.38]
5
+ - Stability
6
+
1
7
  ## [0.0.37]
2
8
  - Stability
3
9
 
@@ -0,0 +1,50 @@
1
+ Metadata-Version: 2.4
2
+ Name: meshagent-openai
3
+ Version: 0.0.39
4
+ Summary: OpenAI Building Blocks for Meshagent
5
+ License-Expression: Apache-2.0
6
+ Project-URL: Documentation, https://docs.meshagent.com
7
+ Project-URL: Website, https://www.meshagent.com
8
+ Project-URL: Source, https://www.meshagent.com
9
+ Requires-Python: >=3.12
10
+ Description-Content-Type: text/markdown
11
+ License-File: LICENSE
12
+ Requires-Dist: pyjwt~=2.10
13
+ Requires-Dist: pytest~=8.4
14
+ Requires-Dist: pytest-asyncio~=0.26
15
+ Requires-Dist: openai~=1.84
16
+ Requires-Dist: meshagent-api~=0.0.39
17
+ Requires-Dist: meshagent-agents~=0.0.39
18
+ Requires-Dist: meshagent-tools~=0.0.39
19
+ Dynamic: license-file
20
+
21
+ ## MeshAgent OpenAI
22
+
23
+ ## MeshAgent OpenAI
24
+ The ``meshagent.openai`` package provides adapters to integrate OpenAI models with MeshAgent tools and agents.
25
+
26
+ ### Completions Adapter and Responses Adapter
27
+ MeshAgent supports both the OpenAI Chat Completions API and Responses API. It is recommended to use the Responses adapter given the newer OpenAI models and functionality use the Responses adapter.
28
+
29
+ - ``OpenAICompletionsAdapter``: wraps the OpenAI Chat Completions API. It turns Toolkit objects into OpenAI-style tool definitions and processes tool calls appropriately.
30
+ - ``OpenAIResponsesAdapter``: wraps the newer OpenAI Responses API. It collects tools, handles streaming events, and provides callbacks for advanced features like image generation or web search.
31
+
32
+ ```Python Python
33
+ from meshagent.openai import OpenAIResponsesAdapter
34
+ from openai import AsyncOpenAI
35
+
36
+ # Use an OpenAI client inside a MeshAgent LLMAdapter
37
+ adapter = OpenAIResponsesAdapter(client=AsyncOpenAI(api_key="sk-..."))
38
+ ```
39
+
40
+ ### Tool Response Adapter
41
+ The ``OpenAICompletionsToolResponseAdapter`` and ``OpenAIResponsesToolResponseAdapter``convert a tool's structured response into plain text or JSOn that can beinserted into an OpenAI chat context.
42
+
43
+ ---
44
+ ### Learn more about MeshAgent on our website or check out the docs for additional examples!
45
+
46
+ **Website**: [www.meshagent.com](https://www.meshagent.com/)
47
+
48
+ **Documentation**: [docs.meshagent.com](https://docs.meshagent.com/)
49
+
50
+ ---
@@ -0,0 +1,30 @@
1
+ ## MeshAgent OpenAI
2
+
3
+ ## MeshAgent OpenAI
4
+ The ``meshagent.openai`` package provides adapters to integrate OpenAI models with MeshAgent tools and agents.
5
+
6
+ ### Completions Adapter and Responses Adapter
7
+ MeshAgent supports both the OpenAI Chat Completions API and Responses API. It is recommended to use the Responses adapter given the newer OpenAI models and functionality use the Responses adapter.
8
+
9
+ - ``OpenAICompletionsAdapter``: wraps the OpenAI Chat Completions API. It turns Toolkit objects into OpenAI-style tool definitions and processes tool calls appropriately.
10
+ - ``OpenAIResponsesAdapter``: wraps the newer OpenAI Responses API. It collects tools, handles streaming events, and provides callbacks for advanced features like image generation or web search.
11
+
12
+ ```Python Python
13
+ from meshagent.openai import OpenAIResponsesAdapter
14
+ from openai import AsyncOpenAI
15
+
16
+ # Use an OpenAI client inside a MeshAgent LLMAdapter
17
+ adapter = OpenAIResponsesAdapter(client=AsyncOpenAI(api_key="sk-..."))
18
+ ```
19
+
20
+ ### Tool Response Adapter
21
+ The ``OpenAICompletionsToolResponseAdapter`` and ``OpenAIResponsesToolResponseAdapter``convert a tool's structured response into plain text or JSOn that can beinserted into an OpenAI chat context.
22
+
23
+ ---
24
+ ### Learn more about MeshAgent on our website or check out the docs for additional examples!
25
+
26
+ **Website**: [www.meshagent.com](https://www.meshagent.com/)
27
+
28
+ **Documentation**: [docs.meshagent.com](https://docs.meshagent.com/)
29
+
30
+ ---
@@ -0,0 +1,16 @@
1
+ from .tools import (
2
+ OpenAICompletionsAdapter,
3
+ OpenAIResponsesAdapter,
4
+ OpenAICompletionsToolResponseAdapter,
5
+ OpenAIResponsesToolResponseAdapter,
6
+ )
7
+ from .version import __version__
8
+
9
+
10
+ __all__ = [
11
+ __version__,
12
+ OpenAICompletionsAdapter,
13
+ OpenAIResponsesAdapter,
14
+ OpenAICompletionsToolResponseAdapter,
15
+ OpenAIResponsesToolResponseAdapter,
16
+ ]
@@ -1 +1,3 @@
1
1
  from .proxy import get_client
2
+
3
+ __all__ = [get_client]
@@ -1,26 +1,23 @@
1
- import os
2
1
  from meshagent.api import RoomClient
3
2
  from openai import AsyncOpenAI
4
3
 
4
+
5
5
  def get_client(*, room: RoomClient) -> AsyncOpenAI:
6
+ token: str = room.protocol.token
6
7
 
7
- token : str = room.protocol.token
8
-
9
8
  # when running inside the room pod, the room.room_url currently points to the external url
10
9
  # so we need to use url off the protocol (if available).
11
10
  # TODO: room_url should be set properly, but may need a claim in the token to be set during call to say it is local
12
- url : str = getattr(room.protocol, "url", room.room_url)
13
-
11
+ url: str = getattr(room.protocol, "url", room.room_url)
12
+
14
13
  room_proxy_url = f"{url}/v1"
15
14
 
16
15
  if room_proxy_url.startswith("ws:") or room_proxy_url.startswith("wss:"):
17
- room_proxy_url = room_proxy_url.replace("ws","http",1)
16
+ room_proxy_url = room_proxy_url.replace("ws", "http", 1)
18
17
 
19
- openai=AsyncOpenAI(
18
+ openai = AsyncOpenAI(
20
19
  api_key=token,
21
20
  base_url=room_proxy_url,
22
- default_headers={
23
- "Meshagent-Session" : room.session_id
24
- }
21
+ default_headers={"Meshagent-Session": room.session_id},
25
22
  )
26
23
  return openai
@@ -0,0 +1,18 @@
1
+ from .responses_adapter import (
2
+ OpenAIResponsesAdapter,
3
+ OpenAIResponsesToolResponseAdapter,
4
+ )
5
+ from .completions_adapter import (
6
+ OpenAICompletionsAdapter,
7
+ OpenAICompletionsToolResponseAdapter,
8
+ )
9
+ from .stt import OpenAIAudioFileSTT, OpenAISTTToolkit
10
+
11
+ __all__ = [
12
+ OpenAIResponsesAdapter,
13
+ OpenAIResponsesToolResponseAdapter,
14
+ OpenAICompletionsAdapter,
15
+ OpenAICompletionsToolResponseAdapter,
16
+ OpenAIAudioFileSTT,
17
+ OpenAISTTToolkit,
18
+ ]
@@ -0,0 +1,452 @@
1
+ from meshagent.agents.agent import AgentChatContext
2
+ from meshagent.api import RoomClient, RoomException
3
+ from meshagent.tools.blob import Blob, BlobStorage
4
+ from meshagent.tools import Toolkit, ToolContext
5
+ from meshagent.api.messaging import (
6
+ Response,
7
+ LinkResponse,
8
+ FileResponse,
9
+ JsonResponse,
10
+ TextResponse,
11
+ EmptyResponse,
12
+ )
13
+ from meshagent.agents.adapter import ToolResponseAdapter, LLMAdapter
14
+ import json
15
+ from typing import List
16
+
17
+ from openai import AsyncOpenAI, APIStatusError
18
+ from openai.types.chat import ChatCompletion, ChatCompletionMessageToolCall
19
+
20
+ import os
21
+ from typing import Optional, Any
22
+
23
+ import logging
24
+ import re
25
+ import asyncio
26
+
27
+ from meshagent.openai.proxy import get_client
28
+
29
+ logger = logging.getLogger("openai_agent")
30
+
31
+
32
+ def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> str:
33
+ """
34
+ Replaces every character in `text` that does not match the given
35
+ `allowed_chars` regex set with `replacement`.
36
+
37
+ Parameters:
38
+ -----------
39
+ text : str
40
+ The input string on which the replacement is to be done.
41
+ allowed_chars : str
42
+ A string defining the set of allowed characters (part of a character set).
43
+ For example, "a-zA-Z0-9" will keep only letters and digits.
44
+ replacement : str
45
+ The string to replace non-matching characters with.
46
+
47
+ Returns:
48
+ --------
49
+ str
50
+ A new string where all characters not in `allowed_chars` are replaced.
51
+ """
52
+ # Build a regex that matches any character NOT in allowed_chars
53
+ pattern = rf"[^{allowed_chars}]"
54
+ return re.sub(pattern, replacement, text)
55
+
56
+
57
+ def safe_tool_name(name: str):
58
+ return _replace_non_matching(name, "a-zA-Z0-9_-", "_")
59
+
60
+
61
+ # Collects a group of tool proxies and manages execution of openai tool calls
62
+ class CompletionsToolBundle:
63
+ def __init__(self, toolkits: List[Toolkit]):
64
+ self._toolkits = toolkits
65
+ self._executors = dict[str, Toolkit]()
66
+ self._safe_names = {}
67
+
68
+ open_ai_tools = []
69
+
70
+ for toolkit in toolkits:
71
+ for v in toolkit.tools:
72
+ k = v.name
73
+
74
+ name = safe_tool_name(k)
75
+
76
+ if k in self._executors:
77
+ raise Exception(
78
+ f"duplicate in bundle '{k}', tool names must be unique."
79
+ )
80
+
81
+ self._executors[k] = toolkit
82
+
83
+ self._safe_names[name] = k
84
+
85
+ fn = {
86
+ "name": name,
87
+ "parameters": {
88
+ **v.input_schema,
89
+ },
90
+ "strict": True,
91
+ }
92
+
93
+ if v.defs is not None:
94
+ fn["parameters"]["$defs"] = v.defs
95
+
96
+ schema = {
97
+ "type": "function",
98
+ "function": fn,
99
+ }
100
+
101
+ open_ai_tools.append(schema)
102
+
103
+ if len(open_ai_tools) == 0:
104
+ open_ai_tools = None
105
+
106
+ self._open_ai_tools = open_ai_tools
107
+
108
+ async def execute(
109
+ self, *, context: ToolContext, tool_call: ChatCompletionMessageToolCall
110
+ ) -> Response:
111
+ try:
112
+ function = tool_call.function
113
+ name = function.name
114
+ arguments = json.loads(function.arguments)
115
+
116
+ if name not in self._safe_names:
117
+ raise RoomException(
118
+ f"Invalid tool name {name}, check the name of the tool"
119
+ )
120
+
121
+ name = self._safe_names[name]
122
+
123
+ if name not in self._executors:
124
+ raise Exception(f"Unregistered tool name {name}")
125
+
126
+ logger.info("executing %s %s %s", tool_call.id, name, arguments)
127
+
128
+ proxy = self._executors[name]
129
+ result = await proxy.execute(
130
+ context=context, name=name, arguments=arguments
131
+ )
132
+ logger.info("success calling %s %s %s", tool_call.id, name, result)
133
+ return result
134
+
135
+ except Exception as e:
136
+ logger.error("failed calling %s %s", tool_call.id, name, exc_info=e)
137
+ raise
138
+
139
+ def contains(self, name: str) -> bool:
140
+ return name in self._open_ai_tools
141
+
142
+ def to_json(self) -> List[dict] | None:
143
+ if self._open_ai_tools is None:
144
+ return None
145
+ return self._open_ai_tools.copy()
146
+
147
+
148
+ # Converts a tool response into a series of messages that can be inserted into the openai context
149
+ class OpenAICompletionsToolResponseAdapter(ToolResponseAdapter):
150
+ def __init__(self, blob_storage: Optional[BlobStorage] = None):
151
+ self._blob_storage = blob_storage
152
+ pass
153
+
154
+ async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
155
+ if isinstance(response, LinkResponse):
156
+ return json.dumps(
157
+ {
158
+ "name": response.name,
159
+ "url": response.url,
160
+ }
161
+ )
162
+
163
+ elif isinstance(response, JsonResponse):
164
+ return json.dumps(response.json)
165
+
166
+ elif isinstance(response, TextResponse):
167
+ return response.text
168
+
169
+ elif isinstance(response, FileResponse):
170
+ blob = Blob(mime_type=response.mime_type, data=response.data)
171
+ uri = self._blob_storage.store(blob=blob)
172
+
173
+ return f"The results have been written to a blob with the uri {uri} with the mime type {blob.mime_type}."
174
+
175
+ elif isinstance(response, EmptyResponse):
176
+ return "ok"
177
+
178
+ # elif isinstance(response, ImageResponse):
179
+ # context.messages.append({
180
+ # "role" : "tool",
181
+ # "content" : "the user will upload the image",
182
+ # "tool_call_id" : tool_call.id,
183
+ # })
184
+ # context.messages.append({
185
+ # "role" : "user",
186
+ # "content" : [
187
+ # { "type" : "text", "text": "this is the image from tool call id {tool_call.id}" },
188
+ # { "type" : "image_url", "image_url": {"url": response.url, "detail": "auto"} }
189
+ # ]
190
+ # })
191
+
192
+ elif isinstance(response, dict):
193
+ return json.dumps(response)
194
+
195
+ elif isinstance(response, str):
196
+ return response
197
+
198
+ elif response is None:
199
+ return "ok"
200
+
201
+ else:
202
+ raise Exception(
203
+ "unexpected return type: {type}".format(type=type(response))
204
+ )
205
+
206
+ async def create_messages(
207
+ self,
208
+ *,
209
+ context: AgentChatContext,
210
+ tool_call: Any,
211
+ room: RoomClient,
212
+ response: Response,
213
+ ) -> list:
214
+ message = {
215
+ "role": "tool",
216
+ "content": await self.to_plain_text(room=room, response=response),
217
+ "tool_call_id": tool_call.id,
218
+ }
219
+
220
+ room.developer.log_nowait(
221
+ type="llm.message",
222
+ data={
223
+ "context": context.id,
224
+ "participant_id": room.local_participant.id,
225
+ "participant_name": room.local_participant.get_attribute("name"),
226
+ "message": message,
227
+ },
228
+ )
229
+
230
+ return [message]
231
+
232
+
233
+ class OpenAICompletionsAdapter(LLMAdapter):
234
+ def __init__(
235
+ self,
236
+ model: str = os.getenv("OPENAI_MODEL"),
237
+ parallel_tool_calls: Optional[bool] = None,
238
+ client: Optional[AsyncOpenAI] = None,
239
+ ):
240
+ self._model = model
241
+ self._parallel_tool_calls = parallel_tool_calls
242
+ self._client = client
243
+
244
+ def create_chat_context(self):
245
+ system_role = "system"
246
+ if self._model.startswith("o1"):
247
+ system_role = "developer"
248
+ elif self._model.startswith("o3"):
249
+ system_role = "developer"
250
+ elif self._model.startswith("o4"):
251
+ system_role = "developer"
252
+
253
+ context = AgentChatContext(system_role=system_role)
254
+
255
+ return context
256
+
257
+ # Takes the current chat context, executes a completion request and processes the response.
258
+ # If a tool calls are requested, invokes the tools, processes the tool calls results, and appends the tool call results to the context
259
+ async def next(
260
+ self,
261
+ *,
262
+ context: AgentChatContext,
263
+ room: RoomClient,
264
+ toolkits: Toolkit,
265
+ tool_adapter: Optional[ToolResponseAdapter] = None,
266
+ output_schema: Optional[dict] = None,
267
+ ):
268
+ if tool_adapter is None:
269
+ tool_adapter = OpenAICompletionsToolResponseAdapter()
270
+
271
+ try:
272
+ openai = get_client(room=room)
273
+
274
+ tool_bundle = CompletionsToolBundle(
275
+ toolkits=[
276
+ *toolkits,
277
+ ]
278
+ )
279
+ open_ai_tools = tool_bundle.to_json()
280
+
281
+ if open_ai_tools is not None:
282
+ logger.info("OpenAI Tools: %s", json.dumps(open_ai_tools))
283
+ else:
284
+ logger.info("OpenAI Tools: Empty")
285
+
286
+ response_schema = output_schema
287
+ response_name = "response"
288
+
289
+ while True:
290
+ logger.info(
291
+ "model: %s, context: %s, output_schema: %s",
292
+ self._model,
293
+ context.messages,
294
+ output_schema,
295
+ )
296
+ ptc = self._parallel_tool_calls
297
+ extra = {}
298
+ if ptc is not None and not self._model.startswith("o"):
299
+ extra["parallel_tool_calls"] = ptc
300
+
301
+ if output_schema is not None:
302
+ extra["response_format"] = {
303
+ "type": "json_schema",
304
+ "json_schema": {
305
+ "name": response_name,
306
+ "schema": response_schema,
307
+ "strict": True,
308
+ },
309
+ }
310
+
311
+ response: ChatCompletion = await openai.chat.completions.create(
312
+ n=1,
313
+ model=self._model,
314
+ messages=context.messages,
315
+ tools=open_ai_tools,
316
+ **extra,
317
+ )
318
+ message = response.choices[0].message
319
+ room.developer.log_nowait(
320
+ type="llm.message",
321
+ data={
322
+ "context": context.id,
323
+ "participant_id": room.local_participant.id,
324
+ "participant_name": room.local_participant.get_attribute(
325
+ "name"
326
+ ),
327
+ "message": message.to_dict(),
328
+ },
329
+ )
330
+ context.messages.append(message)
331
+
332
+ if message.tool_calls is not None:
333
+ tasks = []
334
+
335
+ async def do_tool_call(tool_call: ChatCompletionMessageToolCall):
336
+ try:
337
+ tool_context = ToolContext(
338
+ room=room,
339
+ caller=room.local_participant,
340
+ caller_context={"chat": context.to_json},
341
+ )
342
+ tool_response = await tool_bundle.execute(
343
+ context=tool_context, tool_call=tool_call
344
+ )
345
+ logger.info(f"tool response {tool_response}")
346
+ return await tool_adapter.create_messages(
347
+ context=context,
348
+ tool_call=tool_call,
349
+ room=room,
350
+ response=tool_response,
351
+ )
352
+
353
+ except Exception as e:
354
+ logger.error(
355
+ f"unable to complete tool call {tool_call}", exc_info=e
356
+ )
357
+ room.developer.log_nowait(
358
+ type="llm.error",
359
+ data={
360
+ "participant_id": room.local_participant.id,
361
+ "participant_name": room.local_participant.get_attribute(
362
+ "name"
363
+ ),
364
+ "error": f"{e}",
365
+ },
366
+ )
367
+
368
+ return [
369
+ {
370
+ "role": "tool",
371
+ "content": json.dumps(
372
+ {"error": f"unable to complete tool call: {e}"}
373
+ ),
374
+ "tool_call_id": tool_call.id,
375
+ }
376
+ ]
377
+
378
+ for tool_call in message.tool_calls:
379
+ tasks.append(asyncio.create_task(do_tool_call(tool_call)))
380
+
381
+ results = await asyncio.gather(*tasks)
382
+
383
+ for result in results:
384
+ if result is not None:
385
+ room.developer.log_nowait(
386
+ type="llm.message",
387
+ data={
388
+ "context": context.id,
389
+ "participant_id": room.local_participant.id,
390
+ "participant_name": room.local_participant.get_attribute(
391
+ "name"
392
+ ),
393
+ "message": result,
394
+ },
395
+ )
396
+ context.messages.append(result)
397
+
398
+ elif message.content is not None:
399
+ content = message.content
400
+
401
+ logger.info("RESPONSE FROM OPENAI %s", content)
402
+ if response_schema is None:
403
+ return content
404
+
405
+ # First try to parse the result
406
+ try:
407
+ full_response = json.loads(content)
408
+ # sometimes open ai packs two JSON chunks seperated by newline, check if that's why we couldn't parse
409
+ except json.decoder.JSONDecodeError:
410
+ for part in content.splitlines():
411
+ if len(part.strip()) > 0:
412
+ full_response = json.loads(part)
413
+
414
+ try:
415
+ self.validate(
416
+ response=full_response,
417
+ output_schema=response_schema,
418
+ )
419
+ except Exception as e:
420
+ logger.error(
421
+ "recieved invalid response, retrying",
422
+ exc_info=e,
423
+ )
424
+ error = {
425
+ "role": "user",
426
+ "content": "encountered a validation error with the output: {error}".format(
427
+ error=e
428
+ ),
429
+ }
430
+ room.developer.log_nowait(
431
+ type="llm.message",
432
+ data={
433
+ "context": context.id,
434
+ "participant_id": room.local_participant.id,
435
+ "participant_name": room.local_participant.get_attribute(
436
+ "name"
437
+ ),
438
+ "message": error,
439
+ },
440
+ )
441
+ context.messages.append(error)
442
+ continue
443
+
444
+ return full_response
445
+ else:
446
+ raise RoomException(
447
+ "Unexpected response from OpenAI {response}".format(
448
+ response=message
449
+ )
450
+ )
451
+ except APIStatusError as e:
452
+ raise RoomException(f"Error from OpenAI: {e}")