meshagent-openai 0.5.15__tar.gz → 0.6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of meshagent-openai might be problematic. Click here for more details.

Files changed (24) hide show
  1. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/CHANGELOG.md +15 -0
  2. {meshagent_openai-0.5.15/meshagent_openai.egg-info → meshagent_openai-0.6.0}/PKG-INFO +6 -6
  3. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent/openai/proxy/proxy.py +11 -1
  4. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent/openai/tools/completions_adapter.py +6 -9
  5. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent/openai/tools/responses_adapter.py +227 -93
  6. meshagent_openai-0.6.0/meshagent/openai/version.py +1 -0
  7. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0/meshagent_openai.egg-info}/PKG-INFO +6 -6
  8. meshagent_openai-0.6.0/meshagent_openai.egg-info/requires.txt +7 -0
  9. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/pyproject.toml +5 -5
  10. meshagent_openai-0.5.15/meshagent/openai/version.py +0 -1
  11. meshagent_openai-0.5.15/meshagent_openai.egg-info/requires.txt +0 -7
  12. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/LICENSE +0 -0
  13. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/MANIFEST.in +0 -0
  14. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/README.md +0 -0
  15. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent/openai/__init__.py +0 -0
  16. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent/openai/proxy/__init__.py +0 -0
  17. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent/openai/tools/__init__.py +0 -0
  18. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent/openai/tools/schema.py +0 -0
  19. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent/openai/tools/stt.py +0 -0
  20. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent/openai/tools/stt_test.py +0 -0
  21. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent_openai.egg-info/SOURCES.txt +0 -0
  22. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent_openai.egg-info/dependency_links.txt +0 -0
  23. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/meshagent_openai.egg-info/top_level.txt +0 -0
  24. {meshagent_openai-0.5.15 → meshagent_openai-0.6.0}/setup.cfg +0 -0
@@ -1,3 +1,18 @@
1
+ ## [0.6.0]
2
+ - Stability
3
+
4
+ ## [0.5.19]
5
+ - Stability
6
+
7
+ ## [0.5.18]
8
+ - Stability
9
+
10
+ ## [0.5.17]
11
+ - Stability
12
+
13
+ ## [0.5.16]
14
+ - Stability
15
+
1
16
  ## [0.5.15]
2
17
  - Stability
3
18
 
@@ -1,21 +1,21 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: meshagent-openai
3
- Version: 0.5.15
3
+ Version: 0.6.0
4
4
  Summary: OpenAI Building Blocks for Meshagent
5
5
  License-Expression: Apache-2.0
6
6
  Project-URL: Documentation, https://docs.meshagent.com
7
7
  Project-URL: Website, https://www.meshagent.com
8
8
  Project-URL: Source, https://www.meshagent.com
9
- Requires-Python: >=3.12
9
+ Requires-Python: >=3.13
10
10
  Description-Content-Type: text/markdown
11
11
  License-File: LICENSE
12
12
  Requires-Dist: pyjwt~=2.10
13
13
  Requires-Dist: pytest~=8.4
14
14
  Requires-Dist: pytest-asyncio~=0.26
15
- Requires-Dist: openai~=1.84
16
- Requires-Dist: meshagent-api~=0.5.15
17
- Requires-Dist: meshagent-agents~=0.5.15
18
- Requires-Dist: meshagent-tools~=0.5.15
15
+ Requires-Dist: openai~=2.6.0
16
+ Requires-Dist: meshagent-api~=0.6.0
17
+ Requires-Dist: meshagent-agents~=0.6.0
18
+ Requires-Dist: meshagent-tools~=0.6.0
19
19
  Dynamic: license-file
20
20
 
21
21
  # [Meshagent](https://www.meshagent.com)
@@ -1,5 +1,8 @@
1
1
  from meshagent.api import RoomClient
2
2
  from openai import AsyncOpenAI
3
+ import logging
4
+
5
+ logger = logging.getLogger("openai.client")
3
6
 
4
7
 
5
8
  def get_client(*, room: RoomClient) -> AsyncOpenAI:
@@ -8,7 +11,14 @@ def get_client(*, room: RoomClient) -> AsyncOpenAI:
8
11
  # when running inside the room pod, the room.room_url currently points to the external url
9
12
  # so we need to use url off the protocol (if available).
10
13
  # TODO: room_url should be set properly, but may need a claim in the token to be set during call to say it is local
11
- url: str = getattr(room.protocol, "url", room.room_url)
14
+ url = getattr(room.protocol, "url")
15
+ if url is None:
16
+ logger.debug(
17
+ f"protocol does not have url, openai client falling back to room url {room.room_url}"
18
+ )
19
+ url = room.room_url
20
+ else:
21
+ logger.debug(f"protocol had url, openai client will use {url}")
12
22
 
13
23
  room_proxy_url = f"{url}/v1"
14
24
 
@@ -1,6 +1,5 @@
1
1
  from meshagent.agents.agent import AgentChatContext
2
- from meshagent.api import RoomClient, RoomException
3
- from meshagent.tools.blob import Blob, BlobStorage
2
+ from meshagent.api import RoomClient, RoomException, RemoteParticipant
4
3
  from meshagent.tools import Toolkit, ToolContext
5
4
  from meshagent.api.messaging import (
6
5
  Response,
@@ -135,8 +134,7 @@ class CompletionsToolBundle:
135
134
 
136
135
  # Converts a tool response into a series of messages that can be inserted into the openai context
137
136
  class OpenAICompletionsToolResponseAdapter(ToolResponseAdapter):
138
- def __init__(self, blob_storage: Optional[BlobStorage] = None):
139
- self._blob_storage = blob_storage
137
+ def __init__(self):
140
138
  pass
141
139
 
142
140
  async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
@@ -155,10 +153,7 @@ class OpenAICompletionsToolResponseAdapter(ToolResponseAdapter):
155
153
  return response.text
156
154
 
157
155
  elif isinstance(response, FileResponse):
158
- blob = Blob(mime_type=response.mime_type, data=response.data)
159
- uri = self._blob_storage.store(blob=blob)
160
-
161
- return f"The results have been written to a blob with the uri {uri} with the mime type {blob.mime_type}."
156
+ return f"{response.name}"
162
157
 
163
158
  elif isinstance(response, EmptyResponse):
164
159
  return "ok"
@@ -247,17 +242,19 @@ class OpenAICompletionsAdapter(LLMAdapter):
247
242
  async def next(
248
243
  self,
249
244
  *,
245
+ model: Optional[str] = None,
250
246
  context: AgentChatContext,
251
247
  room: RoomClient,
252
248
  toolkits: Toolkit,
253
249
  tool_adapter: Optional[ToolResponseAdapter] = None,
254
250
  output_schema: Optional[dict] = None,
251
+ on_behalf_of: Optional[RemoteParticipant] = None,
255
252
  ):
256
253
  if tool_adapter is None:
257
254
  tool_adapter = OpenAICompletionsToolResponseAdapter()
258
255
 
259
256
  try:
260
- openai = get_client(room=room)
257
+ openai = self._client if self._client is not None else get_client(room=room)
261
258
 
262
259
  tool_bundle = CompletionsToolBundle(
263
260
  toolkits=[
@@ -1,6 +1,5 @@
1
1
  from meshagent.agents.agent import AgentChatContext
2
- from meshagent.api import RoomClient, RoomException
3
- from meshagent.tools.blob import Blob, BlobStorage
2
+ from meshagent.api import RoomClient, RoomException, RemoteParticipant
4
3
  from meshagent.tools import Toolkit, ToolContext, Tool, BaseTool
5
4
  from meshagent.api.messaging import (
6
5
  Response,
@@ -12,7 +11,12 @@ from meshagent.api.messaging import (
12
11
  RawOutputs,
13
12
  ensure_response,
14
13
  )
15
- from meshagent.agents.adapter import ToolResponseAdapter, LLMAdapter
14
+ from meshagent.agents.adapter import (
15
+ ToolResponseAdapter,
16
+ LLMAdapter,
17
+ ToolkitBuilder,
18
+ ToolkitConfig,
19
+ )
16
20
  import json
17
21
  from typing import List, Literal
18
22
  from meshagent.openai.proxy import get_client
@@ -165,8 +169,7 @@ class ResponsesToolBundle:
165
169
 
166
170
  # Converts a tool response into a series of messages that can be inserted into the openai context
167
171
  class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
168
- def __init__(self, blob_storage: Optional[BlobStorage] = None):
169
- self._blob_storage = blob_storage
172
+ def __init__(self):
170
173
  pass
171
174
 
172
175
  async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
@@ -185,10 +188,7 @@ class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
185
188
  return response.text
186
189
 
187
190
  elif isinstance(response, FileResponse):
188
- blob = Blob(mime_type=response.mime_type, data=response.data)
189
- uri = self._blob_storage.store(blob=blob)
190
-
191
- return f"The results have been written to a blob with the uri {uri} with the mime type {blob.mime_type}."
191
+ return f"{response.name}"
192
192
 
193
193
  elif isinstance(response, EmptyResponse):
194
194
  return "ok"
@@ -246,39 +246,88 @@ class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
246
246
  )
247
247
 
248
248
  return response.outputs
249
+
249
250
  else:
250
251
  span.set_attribute("kind", "text")
251
- output = await self.to_plain_text(room=room, response=response)
252
- span.set_attribute("output", output)
252
+ if isinstance(response, FileResponse):
253
+ if response.mime_type.startswith("image/"):
254
+ span.set_attribute(
255
+ "output", f"image: {response.name}, {response.mime_type}"
256
+ )
253
257
 
254
- message = {
255
- "output": output,
256
- "call_id": tool_call.call_id,
257
- "type": "function_call_output",
258
- }
258
+ message = {
259
+ "output": [
260
+ {
261
+ "type": "input_image",
262
+ "image_url": f"data:{response.mime_type};base64,{base64.b64encode(response.data).decode()}",
263
+ }
264
+ ],
265
+ "call_id": tool_call.call_id,
266
+ "type": "function_call_output",
267
+ }
268
+ else:
269
+ span.set_attribute(
270
+ "output", f"file: {response.name}, {response.mime_type}"
271
+ )
259
272
 
260
- room.developer.log_nowait(
261
- type="llm.message",
262
- data={
263
- "context": context.id,
264
- "participant_id": room.local_participant.id,
265
- "participant_name": room.local_participant.get_attribute(
266
- "name"
267
- ),
268
- "message": message,
269
- },
270
- )
273
+ message = {
274
+ "output": [
275
+ {
276
+ "type": "input_file",
277
+ "filename": response.name,
278
+ "file_data": f"data:{response.mime_type};base64,{base64.b64encode(response.data).decode()}",
279
+ }
280
+ ],
281
+ "call_id": tool_call.call_id,
282
+ "type": "function_call_output",
283
+ }
284
+
285
+ room.developer.log_nowait(
286
+ type="llm.message",
287
+ data={
288
+ "context": context.id,
289
+ "participant_id": room.local_participant.id,
290
+ "participant_name": room.local_participant.get_attribute(
291
+ "name"
292
+ ),
293
+ "message": message,
294
+ },
295
+ )
296
+
297
+ return [message]
298
+ else:
299
+ output = await self.to_plain_text(room=room, response=response)
300
+ span.set_attribute("output", output)
301
+
302
+ message = {
303
+ "output": output,
304
+ "call_id": tool_call.call_id,
305
+ "type": "function_call_output",
306
+ }
307
+
308
+ room.developer.log_nowait(
309
+ type="llm.message",
310
+ data={
311
+ "context": context.id,
312
+ "participant_id": room.local_participant.id,
313
+ "participant_name": room.local_participant.get_attribute(
314
+ "name"
315
+ ),
316
+ "message": message,
317
+ },
318
+ )
271
319
 
272
- return [message]
320
+ return [message]
273
321
 
274
322
 
275
- class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
323
+ class OpenAIResponsesAdapter(LLMAdapter[ResponseStreamEvent]):
276
324
  def __init__(
277
325
  self,
278
- model: str = os.getenv("OPENAI_MODEL", "gpt-4.1"),
326
+ model: str = os.getenv("OPENAI_MODEL", "gpt-5"),
279
327
  parallel_tool_calls: Optional[bool] = None,
280
328
  client: Optional[AsyncOpenAI] = None,
281
329
  response_options: Optional[dict] = None,
330
+ reasoning_effort: Optional[str] = None,
282
331
  provider: str = "openai",
283
332
  ):
284
333
  self._model = model
@@ -286,6 +335,10 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
286
335
  self._client = client
287
336
  self._response_options = response_options
288
337
  self._provider = provider
338
+ self._reasoning_effort = reasoning_effort
339
+
340
+ def default_model(self) -> str:
341
+ return self._model
289
342
 
290
343
  def create_chat_context(self):
291
344
  system_role = "system"
@@ -316,13 +369,18 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
316
369
  async def next(
317
370
  self,
318
371
  *,
372
+ model: Optional[str] = None,
319
373
  context: AgentChatContext,
320
374
  room: RoomClient,
321
375
  toolkits: list[Toolkit],
322
376
  tool_adapter: Optional[ToolResponseAdapter] = None,
323
377
  output_schema: Optional[dict] = None,
324
378
  event_handler: Optional[Callable[[ResponseStreamEvent], None]] = None,
379
+ on_behalf_of: Optional[RemoteParticipant] = None,
325
380
  ):
381
+ if model is None:
382
+ model = self.default_model()
383
+
326
384
  with tracer.start_as_current_span("llm.turn") as span:
327
385
  span.set_attributes({"chat_context": context.id, "api": "responses"})
328
386
 
@@ -333,10 +391,14 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
333
391
  while True:
334
392
  with tracer.start_as_current_span("llm.turn.iteration") as span:
335
393
  span.set_attributes(
336
- {"model": self._model, "provider": self._provider}
394
+ {"model": model, "provider": self._provider}
337
395
  )
338
396
 
339
- openai = get_client(room=room)
397
+ openai = (
398
+ self._client
399
+ if self._client is not None
400
+ else get_client(room=room)
401
+ )
340
402
 
341
403
  response_schema = output_schema
342
404
  response_name = "response"
@@ -355,7 +417,7 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
355
417
 
356
418
  ptc = self._parallel_tool_calls
357
419
  extra = {}
358
- if ptc is not None and not self._model.startswith("o"):
420
+ if ptc is not None and not model.startswith("o"):
359
421
  extra["parallel_tool_calls"] = ptc
360
422
  span.set_attribute("parallel_tool_calls", ptc)
361
423
  else:
@@ -382,12 +444,30 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
382
444
  stream = event_handler is not None
383
445
 
384
446
  with tracer.start_as_current_span("llm.invoke") as span:
385
- response_options = self._response_options
447
+ response_options = copy.deepcopy(self._response_options)
386
448
  if response_options is None:
387
449
  response_options = {}
450
+
451
+ if self._reasoning_effort is not None:
452
+ response_options["reasoning"] = {
453
+ "effort": self._reasoning_effort,
454
+ "summary": "detailed",
455
+ }
456
+
457
+ extra_headers = {}
458
+ if on_behalf_of is not None:
459
+ on_behalf_of_name = on_behalf_of.get_attribute("name")
460
+ logger.info(
461
+ f"{room.local_participant.get_attribute('name')} making openai request on behalf of {on_behalf_of_name}"
462
+ )
463
+ extra_headers["Meshagent-On-Behalf-Of"] = (
464
+ on_behalf_of_name
465
+ )
466
+
388
467
  response: Response = await openai.responses.create(
468
+ extra_headers=extra_headers,
389
469
  stream=stream,
390
- model=self._model,
470
+ model=model,
391
471
  input=context.messages,
392
472
  tools=open_ai_tools,
393
473
  text=text,
@@ -723,7 +803,6 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
723
803
  "event": safe_model_dump(event),
724
804
  }
725
805
  )
726
-
727
806
  event_handler(event)
728
807
 
729
808
  if event.type == "response.completed":
@@ -811,32 +890,47 @@ class OpenAIResponsesTool(BaseTool):
811
890
  return {}
812
891
 
813
892
 
893
+ class ImageGenerationConfig(ToolkitConfig):
894
+ name: Literal["image_generation"]
895
+ background: Literal["transparent", "opaque", "auto"] = None
896
+ input_image_mask_url: Optional[str] = None
897
+ model: Optional[str] = None
898
+ moderation: Optional[str] = None
899
+ output_compression: Optional[int] = None
900
+ output_format: Optional[Literal["png", "webp", "jpeg"]] = None
901
+ partial_images: Optional[int] = None
902
+ quality: Optional[Literal["auto", "low", "medium", "high"]] = None
903
+ size: Optional[Literal["1024x1024", "1024x1536", "1536x1024", "auto"]] = None
904
+
905
+
906
+ class ImageGenerationToolkitBuilder(ToolkitBuilder):
907
+ def __init__(self):
908
+ super().__init__(name="image_generation", type=ImageGenerationConfig)
909
+
910
+ def make(self, *, model: str, config: ImageGenerationConfig):
911
+ return Toolkit(
912
+ name="image_generation", tools=[ImageGenerationTool(config=config)]
913
+ )
914
+
915
+
814
916
  class ImageGenerationTool(OpenAIResponsesTool):
815
917
  def __init__(
816
918
  self,
817
919
  *,
818
- background: Literal["transparent", "opaque", "auto"] = None,
819
- input_image_mask_url: Optional[str] = None,
820
- model: Optional[str] = None,
821
- moderation: Optional[str] = None,
822
- output_compression: Optional[int] = None,
823
- output_format: Optional[Literal["png", "webp", "jpeg"]] = None,
824
- partial_images: Optional[int] = None,
825
- quality: Optional[Literal["auto", "low", "medium", "high"]] = None,
826
- size: Optional[Literal["1024x1024", "1024x1536", "1536x1024", "auto"]] = None,
920
+ config: ImageGenerationConfig,
827
921
  ):
828
922
  super().__init__(name="image_generation")
829
- self.background = background
830
- self.input_image_mask_url = input_image_mask_url
831
- self.model = model
832
- self.moderation = moderation
833
- self.output_compression = output_compression
834
- self.output_format = output_format
835
- if partial_images is None:
836
- partial_images = 1 # streaming wants non zero, and we stream by default
837
- self.partial_images = partial_images
838
- self.quality = quality
839
- self.size = size
923
+ self.background = config.background
924
+ self.input_image_mask_url = config.input_image_mask_url
925
+ self.model = config.model
926
+ self.moderation = config.moderation
927
+ self.output_compression = config.output_compression
928
+ self.output_format = config.output_format
929
+ self.partial_images = (
930
+ config.partial_images if config.partial_images is not None else 1
931
+ )
932
+ self.quality = config.quality
933
+ self.size = config.size
840
934
 
841
935
  def get_open_ai_tool_definitions(self):
842
936
  opts = {"type": "image_generation"}
@@ -982,8 +1076,20 @@ class ImageGenerationTool(OpenAIResponsesTool):
982
1076
  )
983
1077
 
984
1078
 
985
- class LocalShellTool(OpenAIResponsesTool):
1079
+ class LocalShellConfig(ToolkitConfig):
1080
+ name: Literal["local_shell"]
1081
+
1082
+
1083
+ class LocalShellToolkitBuilder(ToolkitBuilder):
986
1084
  def __init__(self):
1085
+ super().__init__(name="local_shell", type=LocalShellConfig)
1086
+
1087
+ def make(self, *, model: str, config: LocalShellConfig):
1088
+ return Toolkit(name="local_shell", tools=[LocalShellTool(config=config)])
1089
+
1090
+
1091
+ class LocalShellTool(OpenAIResponsesTool):
1092
+ def __init__(self, *, config: LocalShellConfig):
987
1093
  super().__init__(name="local_shell")
988
1094
 
989
1095
  def get_open_ai_tool_definitions(self):
@@ -1149,34 +1255,40 @@ class MCPToolDefinition:
1149
1255
  self.description = description
1150
1256
 
1151
1257
 
1152
- class MCPServer:
1153
- def __init__(
1154
- self,
1155
- *,
1156
- server_label: str,
1157
- server_url: str,
1158
- allowed_tools: Optional[list[str]] = None,
1159
- headers: Optional[dict] = None,
1160
- # require approval for all tools
1161
- require_approval: Optional[Literal["always", "never"]] = None,
1162
- # list of tools that always require approval
1163
- always_require_approval: Optional[list[str]] = None,
1164
- # list of tools that never require approval
1165
- never_require_approval: Optional[list[str]] = None,
1166
- ):
1167
- self.server_label = server_label
1168
- self.server_url = server_url
1169
- self.allowed_tools = allowed_tools
1170
- self.headers = headers
1171
- self.require_approval = require_approval
1172
- self.always_require_approval = always_require_approval
1173
- self.never_require_approval = never_require_approval
1258
+ class MCPServer(BaseModel):
1259
+ server_label: str
1260
+ server_url: Optional[str] = None
1261
+ allowed_tools: Optional[list[str]] = None
1262
+ authorization: Optional[str] = None
1263
+ headers: Optional[dict] = None
1264
+
1265
+ # require approval for all tools
1266
+ require_approval: Optional[Literal["always", "never"]] = None
1267
+ # list of tools that always require approval
1268
+ always_require_approval: Optional[list[str]] = None
1269
+ # list of tools that never require approval
1270
+ never_require_approval: Optional[list[str]] = None
1271
+
1272
+ openai_connector_id: Optional[str] = None
1273
+
1274
+
1275
+ class MCPConfig(ToolkitConfig):
1276
+ name: Literal["mcp"]
1277
+ servers: list[MCPServer]
1278
+
1279
+
1280
+ class MCPToolkitBuilder(ToolkitBuilder):
1281
+ def __init__(self):
1282
+ super().__init__(name="mcp", type=MCPConfig)
1283
+
1284
+ def make(self, *, model: str, config: MCPConfig):
1285
+ return Toolkit(name="mcp", tools=[MCPTool(config=config)])
1174
1286
 
1175
1287
 
1176
1288
  class MCPTool(OpenAIResponsesTool):
1177
- def __init__(self, *, servers: list[MCPServer]):
1289
+ def __init__(self, *, config: MCPConfig):
1178
1290
  super().__init__(name="mcp")
1179
- self.servers = servers
1291
+ self.servers = config.servers
1180
1292
 
1181
1293
  def get_open_ai_tool_definitions(self):
1182
1294
  defs = []
@@ -1184,12 +1296,20 @@ class MCPTool(OpenAIResponsesTool):
1184
1296
  opts = {
1185
1297
  "type": "mcp",
1186
1298
  "server_label": server.server_label,
1187
- "server_url": server.server_url,
1188
1299
  }
1189
1300
 
1301
+ if server.server_url is not None:
1302
+ opts["server_url"] = server.server_url
1303
+
1304
+ if server.openai_connector_id is not None:
1305
+ opts["connector_id"] = server.openai_connector_id
1306
+
1190
1307
  if server.allowed_tools is not None:
1191
1308
  opts["allowed_tools"] = server.allowed_tools
1192
1309
 
1310
+ if server.authorization is not None:
1311
+ opts["authorization"] = server.authorization
1312
+
1193
1313
  if server.headers is not None:
1194
1314
  opts["headers"] = server.headers
1195
1315
 
@@ -1392,19 +1512,19 @@ class MCPTool(OpenAIResponsesTool):
1392
1512
  type: str,
1393
1513
  **extra,
1394
1514
  ):
1395
- logger.info("approval requested for MCP tool {server_label}.{name}")
1515
+ logger.info(f"approval requested for MCP tool {server_label}.{name}")
1396
1516
  should_approve = await self.on_mcp_approval_request(
1397
1517
  context, arguments=arguments, name=name, server_label=server_label
1398
1518
  )
1399
1519
  if should_approve:
1400
- logger.info("approval granted for MCP tool {server_label}.{name}")
1520
+ logger.info(f"approval granted for MCP tool {server_label}.{name}")
1401
1521
  return {
1402
1522
  "type": "mcp_approval_response",
1403
1523
  "approve": True,
1404
1524
  "approval_request_id": id,
1405
1525
  }
1406
1526
  else:
1407
- logger.info("approval denied for MCP tool {server_label}.{name}")
1527
+ logger.info(f"approval denied for MCP tool {server_label}.{name}")
1408
1528
  return {
1409
1529
  "type": "mcp_approval_response",
1410
1530
  "approve": False,
@@ -1438,7 +1558,6 @@ class ReasoningTool(OpenAIResponsesTool):
1438
1558
  part: dict,
1439
1559
  sequence_number: int,
1440
1560
  summary_index: int,
1441
- text: str,
1442
1561
  type: str,
1443
1562
  **extra,
1444
1563
  ):
@@ -1453,7 +1572,6 @@ class ReasoningTool(OpenAIResponsesTool):
1453
1572
  part: dict,
1454
1573
  sequence_number: int,
1455
1574
  summary_index: int,
1456
- text: str,
1457
1575
  type: str,
1458
1576
  **extra,
1459
1577
  ):
@@ -1467,7 +1585,6 @@ class ReasoningTool(OpenAIResponsesTool):
1467
1585
  output_index: int,
1468
1586
  sequence_number: int,
1469
1587
  summary_index: int,
1470
- text: str,
1471
1588
  type: str,
1472
1589
  **extra,
1473
1590
  ):
@@ -1481,7 +1598,6 @@ class ReasoningTool(OpenAIResponsesTool):
1481
1598
  output_index: int,
1482
1599
  sequence_number: int,
1483
1600
  summary_index: int,
1484
- text: str,
1485
1601
  type: str,
1486
1602
  **extra,
1487
1603
  ):
@@ -1491,7 +1607,8 @@ class ReasoningTool(OpenAIResponsesTool):
1491
1607
  self,
1492
1608
  context: ToolContext,
1493
1609
  *,
1494
- summary: str,
1610
+ summary: list[str],
1611
+ content: Optional[list[str]] = None,
1495
1612
  encrypted_content: str | None,
1496
1613
  status: Literal["in_progress", "completed", "incomplete"],
1497
1614
  ):
@@ -1502,22 +1619,39 @@ class ReasoningTool(OpenAIResponsesTool):
1502
1619
  context: ToolContext,
1503
1620
  *,
1504
1621
  id: str,
1505
- summary: str,
1622
+ summary: list[dict],
1506
1623
  type: str,
1624
+ content: Optional[list[dict]],
1507
1625
  encrypted_content: str | None,
1508
1626
  status: str,
1509
1627
  **extra,
1510
1628
  ):
1511
1629
  await self.on_reasoning(
1512
- context, summary=summary, encrypted_content=encrypted_content, status=status
1630
+ context,
1631
+ summary=summary,
1632
+ content=content,
1633
+ encrypted_content=encrypted_content,
1634
+ status=status,
1513
1635
  )
1514
1636
 
1515
1637
 
1516
1638
  # TODO: computer tool call
1517
1639
 
1518
1640
 
1519
- class WebSearchTool(OpenAIResponsesTool):
1641
+ class WebSearchConfig(ToolkitConfig):
1642
+ name: Literal["web_search"]
1643
+
1644
+
1645
+ class WebSearchToolkitBuilder(ToolkitBuilder):
1520
1646
  def __init__(self):
1647
+ super().__init__(name="web_search", type=WebSearchConfig)
1648
+
1649
+ def make(self, *, model: str, config: WebSearchConfig):
1650
+ return Toolkit(name="web_search", tools=[WebSearchTool(config=config)])
1651
+
1652
+
1653
+ class WebSearchTool(OpenAIResponsesTool):
1654
+ def __init__(self, *, config: WebSearchConfig):
1521
1655
  super().__init__(name="web_search")
1522
1656
 
1523
1657
  def get_open_ai_tool_definitions(self) -> list[dict]:
@@ -0,0 +1 @@
1
+ __version__ = "0.6.0"
@@ -1,21 +1,21 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: meshagent-openai
3
- Version: 0.5.15
3
+ Version: 0.6.0
4
4
  Summary: OpenAI Building Blocks for Meshagent
5
5
  License-Expression: Apache-2.0
6
6
  Project-URL: Documentation, https://docs.meshagent.com
7
7
  Project-URL: Website, https://www.meshagent.com
8
8
  Project-URL: Source, https://www.meshagent.com
9
- Requires-Python: >=3.12
9
+ Requires-Python: >=3.13
10
10
  Description-Content-Type: text/markdown
11
11
  License-File: LICENSE
12
12
  Requires-Dist: pyjwt~=2.10
13
13
  Requires-Dist: pytest~=8.4
14
14
  Requires-Dist: pytest-asyncio~=0.26
15
- Requires-Dist: openai~=1.84
16
- Requires-Dist: meshagent-api~=0.5.15
17
- Requires-Dist: meshagent-agents~=0.5.15
18
- Requires-Dist: meshagent-tools~=0.5.15
15
+ Requires-Dist: openai~=2.6.0
16
+ Requires-Dist: meshagent-api~=0.6.0
17
+ Requires-Dist: meshagent-agents~=0.6.0
18
+ Requires-Dist: meshagent-tools~=0.6.0
19
19
  Dynamic: license-file
20
20
 
21
21
  # [Meshagent](https://www.meshagent.com)
@@ -0,0 +1,7 @@
1
+ pyjwt~=2.10
2
+ pytest~=8.4
3
+ pytest-asyncio~=0.26
4
+ openai~=2.6.0
5
+ meshagent-api~=0.6.0
6
+ meshagent-agents~=0.6.0
7
+ meshagent-tools~=0.6.0
@@ -6,17 +6,17 @@ build-backend = "setuptools.build_meta"
6
6
  name = "meshagent-openai"
7
7
  description = "OpenAI Building Blocks for Meshagent"
8
8
  dynamic = ["version", "readme"]
9
- requires-python = ">=3.12"
9
+ requires-python = ">=3.13"
10
10
  license = "Apache-2.0"
11
11
  keywords = []
12
12
  dependencies = [
13
13
  "pyjwt~=2.10",
14
14
  "pytest~=8.4",
15
15
  "pytest-asyncio~=0.26",
16
- "openai~=1.84",
17
- "meshagent-api~=0.5.15",
18
- "meshagent-agents~=0.5.15",
19
- "meshagent-tools~=0.5.15"
16
+ "openai~=2.6.0",
17
+ "meshagent-api~=0.6.0",
18
+ "meshagent-agents~=0.6.0",
19
+ "meshagent-tools~=0.6.0"
20
20
  ]
21
21
 
22
22
  [project.urls]
@@ -1 +0,0 @@
1
- __version__ = "0.5.15"
@@ -1,7 +0,0 @@
1
- pyjwt~=2.10
2
- pytest~=8.4
3
- pytest-asyncio~=0.26
4
- openai~=1.84
5
- meshagent-api~=0.5.15
6
- meshagent-agents~=0.5.15
7
- meshagent-tools~=0.5.15