meshagent-openai 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of meshagent-openai might be problematic. Click here for more details.

@@ -0,0 +1,201 @@
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
@@ -0,0 +1 @@
1
+ include version.py
@@ -0,0 +1,25 @@
1
+ Metadata-Version: 2.2
2
+ Name: meshagent-openai
3
+ Version: 0.0.1
4
+ Summary: OpenAI Building Blocks for Meshagent
5
+ Home-page:
6
+ License: Apache License 2.0
7
+ Project-URL: Documentation, https://meshagent.com
8
+ Project-URL: Website, https://meshagent.com
9
+ Project-URL: Source, https://github.com/meshagent
10
+ Requires-Python: >=3.9.0
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: pyjwt>=2.0.0
14
+ Requires-Dist: pytest>=8.3.4
15
+ Requires-Dist: pytest-asyncio>=0.24.0
16
+ Requires-Dist: openai>=1.66.2
17
+ Requires-Dist: meshagent-api>=0.0.1
18
+ Requires-Dist: meshagent-agents>=0.0.1
19
+ Requires-Dist: meshagent-tools>=0.0.1
20
+ Dynamic: description-content-type
21
+ Dynamic: license
22
+ Dynamic: project-url
23
+ Dynamic: requires-dist
24
+ Dynamic: requires-python
25
+ Dynamic: summary
File without changes
@@ -0,0 +1 @@
1
+ from .tools import OpenAICompletionsAdapter, OpenAIResponsesAdapter, OpenAICompletionsToolResponseAdapter, OpenAIResponsesToolResponseAdapter
@@ -0,0 +1,2 @@
1
+ from .responses_adapter import OpenAIResponsesAdapter, OpenAIResponsesToolResponseAdapter
2
+ from .completions_adapter import OpenAICompletionsAdapter, OpenAICompletionsToolResponseAdapter
@@ -0,0 +1,390 @@
1
+
2
+ from meshagent.agents.agent import Agent, AgentChatContext, AgentCallContext
3
+ from meshagent.api import WebSocketClientProtocol, RoomClient, RoomException
4
+ from meshagent.tools.blob import Blob, BlobStorage
5
+ from meshagent.tools import Toolkit, ToolContext, Tool
6
+ from meshagent.api.messaging import Response, LinkResponse, FileResponse, JsonResponse, TextResponse, EmptyResponse
7
+ from meshagent.agents.schema import prompt_schema
8
+ from meshagent.agents.adapter import ToolResponseAdapter, LLMAdapter
9
+ from uuid import uuid4
10
+ import json
11
+ from jsonschema import validate
12
+ from typing import List, Dict
13
+
14
+ from openai import AsyncOpenAI, APIStatusError
15
+ from openai.types.chat import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall
16
+
17
+ from copy import deepcopy
18
+ from abc import abstractmethod, ABC
19
+ import os
20
+ import jsonschema
21
+ from typing import Optional, Any
22
+
23
+ import logging
24
+ import re
25
+ import asyncio
26
+
27
+ logging.basicConfig()
28
+ logger = logging.getLogger("openai_agent")
29
+ logger.setLevel(logging.INFO)
30
+
31
+
32
+
33
+
34
+ def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> str:
35
+ """
36
+ Replaces every character in `text` that does not match the given
37
+ `allowed_chars` regex set with `replacement`.
38
+
39
+ Parameters:
40
+ -----------
41
+ text : str
42
+ The input string on which the replacement is to be done.
43
+ allowed_chars : str
44
+ A string defining the set of allowed characters (part of a character set).
45
+ For example, "a-zA-Z0-9" will keep only letters and digits.
46
+ replacement : str
47
+ The string to replace non-matching characters with.
48
+
49
+ Returns:
50
+ --------
51
+ str
52
+ A new string where all characters not in `allowed_chars` are replaced.
53
+ """
54
+ # Build a regex that matches any character NOT in allowed_chars
55
+ pattern = rf"[^{allowed_chars}]"
56
+ return re.sub(pattern, replacement, text)
57
+
58
+ def safe_tool_name(name: str):
59
+ return _replace_non_matching(name, "a-zA-Z0-9_-", "_")
60
+
61
+ # Collects a group of tool proxies and manages execution of openai tool calls
62
+ class CompletionsToolBundle:
63
+ def __init__(self, toolkits: List[Toolkit]):
64
+ self._toolkits = toolkits
65
+ self._executors = dict[str, Toolkit]()
66
+ self._safe_names = {}
67
+
68
+ open_ai_tools = []
69
+
70
+ for toolkit in toolkits:
71
+ for v in toolkit.tools:
72
+
73
+ k = v.name
74
+
75
+ name = safe_tool_name(k)
76
+
77
+ if k in self._executors:
78
+ raise Exception(f"duplicate in bundle '{k}', tool names must be unique.")
79
+
80
+ self._executors[k] = toolkit
81
+
82
+ self._safe_names[name] = k
83
+
84
+ fn = {
85
+ "name" : name,
86
+ "parameters" : {
87
+ **v.input_schema,
88
+ },
89
+ "strict": True,
90
+ }
91
+
92
+
93
+ if v.defs != None:
94
+ fn["parameters"]["$defs"] = v.defs
95
+
96
+
97
+ schema = {
98
+ "type" : "function",
99
+ "function" : fn,
100
+ }
101
+
102
+
103
+ open_ai_tools.append(schema)
104
+
105
+ if len(open_ai_tools) == 0:
106
+ open_ai_tools = None
107
+
108
+ self._open_ai_tools = open_ai_tools
109
+
110
+ async def execute(self, *, context: ToolContext, tool_call: ChatCompletionMessageToolCall) -> Response:
111
+ try:
112
+
113
+ function = tool_call.function
114
+ name = function.name
115
+ arguments = json.loads(function.arguments)
116
+
117
+ if name not in self._safe_names:
118
+ raise RoomException(f"Invalid tool name {name}, check the name of the tool")
119
+
120
+ name = self._safe_names[name]
121
+
122
+ if name not in self._executors:
123
+ raise Exception(f"Unregistered tool name {name}")
124
+
125
+ logger.info("executing %s %s %s", tool_call.id, name, arguments)
126
+
127
+ proxy = self._executors[name]
128
+ result = await proxy.execute(context=context, name=name, arguments=arguments)
129
+ logger.info("success calling %s %s %s", tool_call.id, name, result)
130
+ return result
131
+
132
+ except Exception as e:
133
+ logger.error("failed calling %s %s", tool_call.id, name, exc_info=e)
134
+ raise
135
+
136
+ def contains(self, name: str) -> bool:
137
+ return name in self._open_ai_tools
138
+
139
+ def to_json(self) -> List[dict] | None:
140
+ if self._open_ai_tools == None:
141
+ return None
142
+ return self._open_ai_tools.copy()
143
+
144
+
145
+ # Converts a tool response into a series of messages that can be inserted into the openai context
146
+ class OpenAICompletionsToolResponseAdapter(ToolResponseAdapter):
147
+ def __init__(self, blob_storage: Optional[BlobStorage] = None):
148
+ self._blob_storage = blob_storage
149
+ pass
150
+
151
+ async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
152
+ if isinstance(response, LinkResponse):
153
+ return json.dumps({
154
+ "name" : response.name,
155
+ "url" : response.url,
156
+ })
157
+
158
+ elif isinstance(response, JsonResponse):
159
+ return json.dumps(response.json)
160
+
161
+ elif isinstance(response, TextResponse):
162
+ return response.text
163
+
164
+ elif isinstance(response, FileResponse):
165
+
166
+ blob = Blob(mime_type=response.mime_type, data=response.data)
167
+ uri = self._blob_storage.store(blob=blob)
168
+
169
+ return f"The results have been written to a blob with the uri {uri} with the mime type {blob.mime_type}."
170
+
171
+ elif isinstance(response, EmptyResponse):
172
+ return "ok"
173
+
174
+ #elif isinstance(response, ImageResponse):
175
+ # context.messages.append({
176
+ # "role" : "tool",
177
+ # "content" : "the user will upload the image",
178
+ # "tool_call_id" : tool_call.id,
179
+ # })
180
+ # context.messages.append({
181
+ # "role" : "user",
182
+ # "content" : [
183
+ # { "type" : "text", "text": "this is the image from tool call id {tool_call.id}" },
184
+ # { "type" : "image_url", "image_url": {"url": response.url, "detail": "auto"} }
185
+ # ]
186
+ # })
187
+
188
+
189
+ elif isinstance(response, dict):
190
+ return json.dumps(response)
191
+
192
+ elif isinstance(response, str):
193
+ return response
194
+
195
+ elif response == None:
196
+ return "ok"
197
+
198
+ else:
199
+ raise Exception("unexpected return type: {type}".format(type=type(response)))
200
+
201
+ async def append_messages(self, *, context: AgentChatContext, tool_call: Any, room: RoomClient, response: Response) -> list:
202
+
203
+ message = {
204
+ "role" : "tool",
205
+ "content" : await self.to_plain_text(room=room, response=response),
206
+ "tool_call_id" : tool_call.id,
207
+ }
208
+
209
+
210
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message })
211
+
212
+ context.messages.append(message)
213
+
214
+
215
+
216
+
217
+ class OpenAICompletionsAdapter(LLMAdapter):
218
+ def __init__(self,
219
+ model: str = os.getenv("OPENAI_MODEL"),
220
+ parallel_tool_calls : Optional[bool] = None,
221
+ client: Optional[AsyncOpenAI] = None,
222
+ reasoning_effort: Optional[str] = None,
223
+ ):
224
+ self._model = model
225
+ self._parallel_tool_calls = parallel_tool_calls
226
+ self._client = client
227
+ self._reasoning_effort = reasoning_effort
228
+
229
+
230
+ def create_chat_context(self):
231
+ system_role = "system"
232
+ if self._model.startswith("o1"):
233
+ system_role = "developer"
234
+ elif self._model.startswith("o3"):
235
+ system_role = "developer"
236
+
237
+ context = AgentChatContext(
238
+ system_role=system_role
239
+ )
240
+
241
+ return context
242
+
243
+ # Takes the current chat context, executes a completion request and processes the response.
244
+ # If a tool calls are requested, invokes the tools, processes the tool calls results, and appends the tool call results to the context
245
+ async def next(self,
246
+ *,
247
+ context: AgentChatContext,
248
+ room: RoomClient,
249
+ toolkits: Toolkit,
250
+ tool_adapter: Optional[ToolResponseAdapter] = None,
251
+ output_schema: Optional[dict] = None,
252
+ ):
253
+ if tool_adapter == None:
254
+ tool_adapter = OpenAICompletionsToolResponseAdapter()
255
+
256
+ try:
257
+ if self._client != None:
258
+ openai = self._client
259
+ else:
260
+
261
+
262
+ token : str = room.protocol.token
263
+ url : str = room.room_url
264
+
265
+ room_proxy_url = f"{url}/v1"
266
+
267
+ openai=AsyncOpenAI(
268
+ api_key=token,
269
+ base_url=room_proxy_url,
270
+ default_headers={
271
+ "Meshagent-Session" : room.session_id
272
+ }
273
+ )
274
+
275
+ tool_bundle = CompletionsToolBundle(toolkits=[
276
+ *toolkits,
277
+ ])
278
+ open_ai_tools = tool_bundle.to_json()
279
+
280
+ if open_ai_tools != None:
281
+ logger.info("OpenAI Tools: %s", json.dumps(open_ai_tools))
282
+ else:
283
+ logger.info("OpenAI Tools: Empty")
284
+
285
+ response_schema = output_schema
286
+ response_name = "response"
287
+
288
+
289
+ while(True):
290
+ logger.info("model: %s, context: %s, output_schema: %s", self._model, context.messages, output_schema)
291
+ ptc = self._parallel_tool_calls
292
+ extra = {}
293
+ if ptc != None and self._model.startswith("o") == False:
294
+ extra["parallel_tool_calls"] = ptc
295
+
296
+ if output_schema != None:
297
+ extra["response_format"] = {
298
+ "type" : "json_schema",
299
+ "json_schema": {
300
+ "name" : response_name,
301
+ "schema" : response_schema,
302
+ "strict" : True,
303
+ }
304
+ }
305
+
306
+ if self._reasoning_effort != None:
307
+ extra["reasoning_effort"] = self._reasoning_effort
308
+
309
+ response : ChatCompletion = await openai.chat.completions.create(
310
+ n=1,
311
+ model = self._model,
312
+ messages = context.messages,
313
+ tools = open_ai_tools,
314
+ **extra
315
+ )
316
+ message = response.choices[0].message
317
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message.to_dict() })
318
+ context.messages.append(message)
319
+
320
+ if message.tool_calls != None:
321
+
322
+
323
+ tasks = []
324
+
325
+ async def do_tool_call(tool_call: ChatCompletionMessageToolCall):
326
+ try:
327
+ tool_context = ToolContext(
328
+ room=room,
329
+ caller=room.local_participant,
330
+ )
331
+ tool_response = await tool_bundle.execute(context=tool_context, tool_call=tool_call)
332
+ logger.info(f"tool response {tool_response}")
333
+ await tool_adapter.append_messages(context=context, tool_call=tool_call, room=room, response=tool_response)
334
+ except Exception as e:
335
+ logger.error(f"unable to complete tool call {tool_call}", exc_info=e)
336
+ room.developer.log_nowait(type="llm.error", data={ "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "error" : f"{e}" })
337
+
338
+ return {
339
+ "role" : "tool",
340
+ "content" : json.dumps({"error":f"unable to complete tool call: {e}"}),
341
+ "tool_call_id" : tool_call.id,
342
+ }
343
+
344
+
345
+ for tool_call in message.tool_calls:
346
+ tasks.append(asyncio.create_task(do_tool_call(tool_call)))
347
+
348
+ results = await asyncio.gather(*tasks)
349
+
350
+ for result in results:
351
+ if result != None:
352
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : result })
353
+ context.messages.append(result)
354
+
355
+
356
+
357
+ elif message.content != None:
358
+ content = message.content
359
+
360
+ logger.info("RESPONSE FROM OPENAI %s", content)
361
+ if response_schema == None:
362
+ return content
363
+
364
+ # First try to parse the result
365
+ try:
366
+ full_response = json.loads(content)
367
+ # sometimes open ai packs two JSON chunks seperated by newline, check if that's why we couldn't parse
368
+ except json.decoder.JSONDecodeError as e:
369
+ for part in content.splitlines():
370
+ if len(part.strip()) > 0:
371
+ full_response = json.loads(part)
372
+
373
+ try:
374
+ self.validate(response=full_response, output_schema=response_schema)
375
+ except Exception as e:
376
+ logger.error("recieved invalid response, retrying", exc_info=e)
377
+ error = { "role" : "user", "content" : "encountered a validation error with the output: {error}".format(error=e)}
378
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : error })
379
+ context.messages.append(error)
380
+ continue
381
+
382
+ return full_response
383
+ else:
384
+ raise RoomException("Unexpected response from OpenAI {response}".format(response=message))
385
+ except APIStatusError as e:
386
+ raise RoomException(f"Error from OpenAI: {e}")
387
+
388
+
389
+
390
+
@@ -0,0 +1,388 @@
1
+
2
+ from meshagent.agents.agent import Agent, AgentChatContext, AgentCallContext
3
+ from meshagent.api import WebSocketClientProtocol, RoomClient, RoomException
4
+ from meshagent.tools.blob import Blob, BlobStorage
5
+ from meshagent.tools import Toolkit, ToolContext, Tool
6
+ from meshagent.api.messaging import Response, LinkResponse, FileResponse, JsonResponse, TextResponse, EmptyResponse
7
+ from meshagent.agents.schema import prompt_schema
8
+ from meshagent.agents.adapter import ToolResponseAdapter, LLMAdapter
9
+ from uuid import uuid4
10
+ import json
11
+ from jsonschema import validate
12
+ from typing import List, Dict
13
+
14
+ from openai import AsyncOpenAI, APIStatusError, NOT_GIVEN
15
+ from openai.types.chat import ChatCompletionMessageToolCall
16
+ from openai.types.responses import ResponseFunctionToolCall
17
+
18
+ from copy import deepcopy
19
+ from abc import abstractmethod, ABC
20
+ import os
21
+ import jsonschema
22
+ from typing import Optional, Any
23
+
24
+ import logging
25
+ import re
26
+ import asyncio
27
+
28
+ logging.basicConfig()
29
+ logger = logging.getLogger("openai_agent")
30
+ logger.setLevel(logging.INFO)
31
+
32
+
33
+
34
+
35
+ def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> str:
36
+ """
37
+ Replaces every character in `text` that does not match the given
38
+ `allowed_chars` regex set with `replacement`.
39
+
40
+ Parameters:
41
+ -----------
42
+ text : str
43
+ The input string on which the replacement is to be done.
44
+ allowed_chars : str
45
+ A string defining the set of allowed characters (part of a character set).
46
+ For example, "a-zA-Z0-9" will keep only letters and digits.
47
+ replacement : str
48
+ The string to replace non-matching characters with.
49
+
50
+ Returns:
51
+ --------
52
+ str
53
+ A new string where all characters not in `allowed_chars` are replaced.
54
+ """
55
+ # Build a regex that matches any character NOT in allowed_chars
56
+ pattern = rf"[^{allowed_chars}]"
57
+ return re.sub(pattern, replacement, text)
58
+
59
+ def safe_tool_name(name: str):
60
+ return _replace_non_matching(name, "a-zA-Z0-9_-", "_")
61
+
62
+ # Collects a group of tool proxies and manages execution of openai tool calls
63
+ class ResponsesToolBundle:
64
+ def __init__(self, toolkits: List[Toolkit]):
65
+ self._toolkits = toolkits
66
+ self._executors = dict[str, Toolkit]()
67
+ self._safe_names = {}
68
+
69
+ open_ai_tools = []
70
+
71
+ for toolkit in toolkits:
72
+ for v in toolkit.tools:
73
+
74
+ k = v.name
75
+
76
+ name = safe_tool_name(k)
77
+
78
+ if k in self._executors:
79
+ raise Exception(f"duplicate in bundle '{k}', tool names must be unique.")
80
+
81
+ self._executors[k] = toolkit
82
+
83
+ self._safe_names[name] = k
84
+
85
+ fn = {
86
+ "type" : "function",
87
+ "name" : name,
88
+ "description" : v.description,
89
+ "parameters" : {
90
+ **v.input_schema,
91
+ },
92
+ "strict": True,
93
+ }
94
+
95
+
96
+ if v.defs != None:
97
+ fn["parameters"]["$defs"] = v.defs
98
+
99
+ open_ai_tools.append(fn)
100
+
101
+ if len(open_ai_tools) == 0:
102
+ open_ai_tools = None
103
+
104
+ self._open_ai_tools = open_ai_tools
105
+
106
+ async def execute(self, *, context: ToolContext, tool_call: ResponseFunctionToolCall) -> Response:
107
+ try:
108
+
109
+ name = tool_call.name
110
+ arguments = json.loads(tool_call.arguments)
111
+
112
+ if name not in self._safe_names:
113
+ raise RoomException(f"Invalid tool name {name}, check the name of the tool")
114
+
115
+ name = self._safe_names[name]
116
+
117
+ if name not in self._executors:
118
+ raise Exception(f"Unregistered tool name {name}")
119
+
120
+ logger.info("executing %s %s %s", tool_call.id, name, arguments)
121
+
122
+ proxy = self._executors[name]
123
+ result = await proxy.execute(context=context, name=name, arguments=arguments)
124
+ logger.info("success calling %s %s %s", tool_call.id, name, result)
125
+ return result
126
+
127
+ except Exception as e:
128
+ logger.error("failed calling %s %s", tool_call.id, name, exc_info=e)
129
+ raise
130
+
131
+ def contains(self, name: str) -> bool:
132
+ return name in self._open_ai_tools
133
+
134
+ def to_json(self) -> List[dict] | None:
135
+ if self._open_ai_tools == None:
136
+ return None
137
+ return self._open_ai_tools.copy()
138
+
139
+
140
+ # Converts a tool response into a series of messages that can be inserted into the openai context
141
+ class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
142
+ def __init__(self, blob_storage: Optional[BlobStorage] = None):
143
+ self._blob_storage = blob_storage
144
+ pass
145
+
146
+ async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
147
+ if isinstance(response, LinkResponse):
148
+ return json.dumps({
149
+ "name" : response.name,
150
+ "url" : response.url,
151
+ })
152
+
153
+ elif isinstance(response, JsonResponse):
154
+ return json.dumps(response.json)
155
+
156
+ elif isinstance(response, TextResponse):
157
+ return response.text
158
+
159
+ elif isinstance(response, FileResponse):
160
+
161
+ blob = Blob(mime_type=response.mime_type, data=response.data)
162
+ uri = self._blob_storage.store(blob=blob)
163
+
164
+ return f"The results have been written to a blob with the uri {uri} with the mime type {blob.mime_type}."
165
+
166
+ elif isinstance(response, EmptyResponse):
167
+ return "ok"
168
+
169
+ #elif isinstance(response, ImageResponse):
170
+ # context.messages.append({
171
+ # "role" : "assistant",
172
+ # "content" : "the user will upload the image",
173
+ # "tool_call_id" : tool_call.id,
174
+ # })
175
+ # context.messages.append({
176
+ # "role" : "user",
177
+ # "content" : [
178
+ # { "type" : "text", "text": "this is the image from tool call id {tool_call.id}" },
179
+ # { "type" : "image_url", "image_url": {"url": response.url, "detail": "auto"} }
180
+ # ]
181
+ # })
182
+
183
+
184
+ elif isinstance(response, dict):
185
+ return json.dumps(response)
186
+
187
+ elif isinstance(response, str):
188
+ return response
189
+
190
+ elif response == None:
191
+ return "ok"
192
+
193
+ else:
194
+ raise Exception("unexpected return type: {type}".format(type=type(response)))
195
+
196
+ async def append_messages(self, *, context: AgentChatContext, tool_call: ResponseFunctionToolCall, room: RoomClient, response: Response) -> list:
197
+
198
+ message = {
199
+ "output" : await self.to_plain_text(room=room, response=response),
200
+ "call_id" : tool_call.call_id,
201
+ "type" : "function_call_output"
202
+ }
203
+
204
+
205
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : message })
206
+
207
+ context.messages.append(message)
208
+
209
+
210
+
211
+
212
+ class OpenAIResponsesAdapter(LLMAdapter):
213
+ def __init__(self,
214
+ model: str = os.getenv("OPENAI_MODEL"),
215
+ parallel_tool_calls : Optional[bool] = None,
216
+ client: Optional[AsyncOpenAI] = None,
217
+ reasoning_effort: Optional[str] = None,
218
+ ):
219
+ self._model = model
220
+ self._parallel_tool_calls = parallel_tool_calls
221
+ self._client = client
222
+ self._reasoning_effort = reasoning_effort
223
+
224
+
225
+ def create_chat_context(self):
226
+ system_role = "system"
227
+ if self._model.startswith("o1"):
228
+ system_role = "developer"
229
+ elif self._model.startswith("o3"):
230
+ system_role = "developer"
231
+
232
+ context = AgentChatContext(
233
+ system_role=system_role
234
+ )
235
+
236
+ return context
237
+
238
+ # Takes the current chat context, executes a completion request and processes the response.
239
+ # If a tool calls are requested, invokes the tools, processes the tool calls results, and appends the tool call results to the context
240
+ async def next(self,
241
+ *,
242
+ context: AgentChatContext,
243
+ room: RoomClient,
244
+ toolkits: Toolkit,
245
+ tool_adapter: Optional[ToolResponseAdapter] = None,
246
+ output_schema: Optional[dict] = None,
247
+ ):
248
+ if tool_adapter == None:
249
+ tool_adapter = OpenAIResponsesToolResponseAdapter()
250
+
251
+ try:
252
+ if self._client != None:
253
+ openai = self._client
254
+ else:
255
+
256
+
257
+ token : str = room.protocol.token
258
+ url : str = room.room_url
259
+
260
+ room_proxy_url = f"{url}/v1"
261
+
262
+ openai=AsyncOpenAI(
263
+ api_key=token,
264
+ base_url=room_proxy_url,
265
+ default_headers={
266
+ "Meshagent-Session" : room.session_id
267
+ }
268
+ )
269
+
270
+ tool_bundle = ResponsesToolBundle(toolkits=[
271
+ *toolkits,
272
+ ])
273
+ open_ai_tools = tool_bundle.to_json()
274
+
275
+ if open_ai_tools != None:
276
+ logger.info("OpenAI Tools: %s", json.dumps(open_ai_tools))
277
+ else:
278
+ logger.info("OpenAI Tools: Empty")
279
+
280
+ response_schema = output_schema
281
+ response_name = "response"
282
+
283
+
284
+ while context.messages[-1].get("role") != "assistant" if context.messages else True:
285
+ logger.info("model: %s, context: %s, output_schema: %s", self._model, context.messages, output_schema)
286
+ ptc = self._parallel_tool_calls
287
+ extra = {}
288
+ if ptc != None and self._model.startswith("o") == False:
289
+ extra["parallel_tool_calls"] = ptc
290
+
291
+ text = NOT_GIVEN
292
+ if output_schema != None:
293
+ text = {
294
+ "format" : {
295
+ "type" : "json_schema",
296
+ "name" : response_name,
297
+ "schema" : response_schema,
298
+ "strict" : True,
299
+ }
300
+ }
301
+
302
+ reasoning = NOT_GIVEN
303
+ if self._reasoning_effort != None:
304
+ reasoning = {
305
+ "effort" : self._reasoning_effort
306
+ }
307
+
308
+ response = await openai.responses.create(
309
+ model = self._model,
310
+ input = context.messages,
311
+ tools = open_ai_tools,
312
+ text = text,
313
+ reasoning=reasoning,
314
+ )
315
+
316
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "response" : response.to_dict() })
317
+
318
+ for message in response.output:
319
+ context.messages.append(message)
320
+
321
+ if message.type == "function_call":
322
+
323
+ tasks = []
324
+
325
+ async def do_tool_call(tool_call: ResponseFunctionToolCall):
326
+ try:
327
+ tool_context = ToolContext(
328
+ room=room,
329
+ caller=room.local_participant,
330
+ )
331
+ tool_response = await tool_bundle.execute(context=tool_context, tool_call=tool_call)
332
+ logger.info(f"tool response {tool_response}")
333
+ await tool_adapter.append_messages(context=context, tool_call=tool_call, room=room, response=tool_response)
334
+ except Exception as e:
335
+ logger.error(f"unable to complete tool call {tool_call}", exc_info=e)
336
+ room.developer.log_nowait(type="llm.error", data={ "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "error" : f"{e}" })
337
+
338
+ return {
339
+ "output" : json.dumps({"error":f"unable to complete tool call: {e}"}),
340
+ "call_id" : tool_call.call_id,
341
+ "type" : "function_call_output"
342
+ }
343
+
344
+
345
+ tasks.append(asyncio.create_task(do_tool_call(message)))
346
+
347
+ results = await asyncio.gather(*tasks)
348
+
349
+ for result in results:
350
+ if result != None:
351
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : result })
352
+ context.messages.append(result)
353
+
354
+ elif response.output_text != None:
355
+
356
+ content = response.output_text
357
+
358
+ logger.info("RESPONSE FROM OPENAI %s", content)
359
+ if response_schema == None:
360
+ return content
361
+
362
+ # First try to parse the result
363
+ try:
364
+ full_response = json.loads(content)
365
+ # sometimes open ai packs two JSON chunks seperated by newline, check if that's why we couldn't parse
366
+ except json.decoder.JSONDecodeError as e:
367
+ for part in content.splitlines():
368
+ if len(part.strip()) > 0:
369
+ full_response = json.loads(part)
370
+
371
+ try:
372
+ self.validate(response=full_response, output_schema=response_schema)
373
+ except Exception as e:
374
+ logger.error("recieved invalid response, retrying", exc_info=e)
375
+ error = { "role" : "user", "content" : "encountered a validation error with the output: {error}".format(error=e)}
376
+ room.developer.log_nowait(type="llm.message", data={ "context" : context.id, "participant_id" : room.local_participant.id, "participant_name" : room.local_participant.get_attribute("name"), "message" : error })
377
+ context.messages.append(error)
378
+ continue
379
+
380
+ return full_response
381
+ else:
382
+ raise RoomException("Unexpected response from OpenAI {response}".format(response=message))
383
+ except APIStatusError as e:
384
+ raise RoomException(f"Error from OpenAI: {e}")
385
+
386
+
387
+
388
+
@@ -0,0 +1,25 @@
1
+ Metadata-Version: 2.2
2
+ Name: meshagent-openai
3
+ Version: 0.0.1
4
+ Summary: OpenAI Building Blocks for Meshagent
5
+ Home-page:
6
+ License: Apache License 2.0
7
+ Project-URL: Documentation, https://meshagent.com
8
+ Project-URL: Website, https://meshagent.com
9
+ Project-URL: Source, https://github.com/meshagent
10
+ Requires-Python: >=3.9.0
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: pyjwt>=2.0.0
14
+ Requires-Dist: pytest>=8.3.4
15
+ Requires-Dist: pytest-asyncio>=0.24.0
16
+ Requires-Dist: openai>=1.66.2
17
+ Requires-Dist: meshagent-api>=0.0.1
18
+ Requires-Dist: meshagent-agents>=0.0.1
19
+ Requires-Dist: meshagent-tools>=0.0.1
20
+ Dynamic: description-content-type
21
+ Dynamic: license
22
+ Dynamic: project-url
23
+ Dynamic: requires-dist
24
+ Dynamic: requires-python
25
+ Dynamic: summary
@@ -0,0 +1,15 @@
1
+ LICENSE
2
+ MANIFEST.in
3
+ README.md
4
+ pyproject.toml
5
+ setup.py
6
+ version.py
7
+ meshagent/openai/__init__.py
8
+ meshagent/openai/tools/__init__.py
9
+ meshagent/openai/tools/completions_adapter.py
10
+ meshagent/openai/tools/responses_adapter.py
11
+ meshagent_openai.egg-info/PKG-INFO
12
+ meshagent_openai.egg-info/SOURCES.txt
13
+ meshagent_openai.egg-info/dependency_links.txt
14
+ meshagent_openai.egg-info/requires.txt
15
+ meshagent_openai.egg-info/top_level.txt
@@ -0,0 +1,7 @@
1
+ pyjwt>=2.0.0
2
+ pytest>=8.3.4
3
+ pytest-asyncio>=0.24.0
4
+ openai>=1.66.2
5
+ meshagent-api>=0.0.1
6
+ meshagent-agents>=0.0.1
7
+ meshagent-tools>=0.0.1
@@ -0,0 +1,5 @@
1
+ [build-system]
2
+ requires = [
3
+ "setuptools>=42",
4
+ ]
5
+ build-backend = "setuptools.build_meta"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,44 @@
1
+ import os
2
+ import pathlib
3
+ from typing import Any, Dict
4
+
5
+ import setuptools # type: ignore
6
+
7
+ here = pathlib.Path(__file__).parent.resolve()
8
+ about: Dict[Any, Any] = {}
9
+ with open(os.path.join(here, "version.py"), "r") as f:
10
+ exec(f.read(), about)
11
+
12
+ setuptools.setup(
13
+ name="meshagent-openai",
14
+ version=about["__version__"],
15
+ description="OpenAI Building Blocks for Meshagent",
16
+ long_description=(here / "README.md").read_text(encoding="utf-8"),
17
+ long_description_content_type="text/markdown",
18
+ url="",
19
+ classifiers=[
20
+ ],
21
+ keywords=[],
22
+ license="Apache License 2.0",
23
+ packages=setuptools.find_namespace_packages(include=[
24
+ "meshagent.*",
25
+ ]),
26
+ python_requires=">=3.9.0",
27
+ install_requires=[
28
+ "pyjwt>=2.0.0",
29
+ "pytest>=8.3.4",
30
+ "pytest-asyncio>=0.24.0",
31
+ "openai>=1.66.2",
32
+ "meshagent-api>=0.0.1",
33
+ "meshagent-agents>=0.0.1",
34
+ "meshagent-tools>=0.0.1"
35
+ ],
36
+ package_data={
37
+ "meshagent.openai": ["py.typed", "*.pyi", "**/*.pyi", "**/*.js"],
38
+ },
39
+ project_urls={
40
+ "Documentation": "https://meshagent.com",
41
+ "Website": "https://meshagent.com",
42
+ "Source": "https://github.com/meshagent",
43
+ },
44
+ )
@@ -0,0 +1 @@
1
+ __version__ = "0.0.1"