meshagent-openai 0.0.11__tar.gz → 0.0.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of meshagent-openai might be problematic. Click here for more details.

Files changed (19) hide show
  1. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/CHANGELOG.md +3 -0
  2. {meshagent_openai-0.0.11/meshagent_openai.egg-info → meshagent_openai-0.0.14}/PKG-INFO +4 -4
  3. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/meshagent/openai/tools/completions_adapter.py +3 -2
  4. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/meshagent/openai/tools/responses_adapter.py +9 -7
  5. meshagent_openai-0.0.14/meshagent/openai/version.py +1 -0
  6. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14/meshagent_openai.egg-info}/PKG-INFO +4 -4
  7. meshagent_openai-0.0.14/meshagent_openai.egg-info/requires.txt +7 -0
  8. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/pyproject.toml +3 -3
  9. meshagent_openai-0.0.11/meshagent/openai/version.py +0 -1
  10. meshagent_openai-0.0.11/meshagent_openai.egg-info/requires.txt +0 -7
  11. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/LICENSE +0 -0
  12. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/MANIFEST.in +0 -0
  13. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/README.md +0 -0
  14. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/meshagent/openai/__init__.py +0 -0
  15. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/meshagent/openai/tools/__init__.py +0 -0
  16. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/meshagent_openai.egg-info/SOURCES.txt +0 -0
  17. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/meshagent_openai.egg-info/dependency_links.txt +0 -0
  18. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/meshagent_openai.egg-info/top_level.txt +0 -0
  19. {meshagent_openai-0.0.11 → meshagent_openai-0.0.14}/setup.cfg +0 -0
@@ -1,3 +1,6 @@
1
+ ## [0.0.14]
2
+ - Stability
3
+
1
4
  ## [0.0.11]
2
5
  - Stability
3
6
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: meshagent-openai
3
- Version: 0.0.11
3
+ Version: 0.0.14
4
4
  Summary: OpenAI Building Blocks for Meshagent
5
5
  License-Expression: Apache-2.0
6
6
  Project-URL: Documentation, https://meshagent.com
@@ -13,7 +13,7 @@ Requires-Dist: pyjwt~=2.10.1
13
13
  Requires-Dist: pytest~=8.3.5
14
14
  Requires-Dist: pytest-asyncio~=0.26.0
15
15
  Requires-Dist: openai~=1.70.0
16
- Requires-Dist: meshagent-api~=0.0.11
17
- Requires-Dist: meshagent-agents~=0.0.11
18
- Requires-Dist: meshagent-tools~=0.0.11
16
+ Requires-Dist: meshagent-api~=0.0.14
17
+ Requires-Dist: meshagent-agents~=0.0.14
18
+ Requires-Dist: meshagent-tools~=0.0.14
19
19
  Dynamic: license-file
@@ -24,9 +24,8 @@ import logging
24
24
  import re
25
25
  import asyncio
26
26
 
27
- logging.basicConfig()
28
27
  logger = logging.getLogger("openai_agent")
29
- logger.setLevel(logging.INFO)
28
+
30
29
 
31
30
 
32
31
 
@@ -231,6 +230,8 @@ class OpenAICompletionsAdapter(LLMAdapter):
231
230
  system_role = "developer"
232
231
  elif self._model.startswith("o3"):
233
232
  system_role = "developer"
233
+ elif self._model.startswith("o4"):
234
+ system_role = "developer"
234
235
 
235
236
  context = AgentChatContext(
236
237
  system_role=system_role
@@ -3,7 +3,7 @@ from meshagent.agents.agent import Agent, AgentChatContext, AgentCallContext
3
3
  from meshagent.api import WebSocketClientProtocol, RoomClient, RoomException
4
4
  from meshagent.tools.blob import Blob, BlobStorage
5
5
  from meshagent.tools import Toolkit, ToolContext, Tool
6
- from meshagent.api.messaging import Response, LinkResponse, FileResponse, JsonResponse, TextResponse, EmptyResponse, RawOutputs
6
+ from meshagent.api.messaging import Response, LinkResponse, FileResponse, JsonResponse, TextResponse, EmptyResponse, RawOutputs, ensure_response
7
7
  from meshagent.api.schema_util import prompt_schema
8
8
  from meshagent.agents.adapter import ToolResponseAdapter, LLMAdapter
9
9
  from uuid import uuid4
@@ -24,9 +24,8 @@ import logging
24
24
  import re
25
25
  import asyncio
26
26
 
27
- logging.basicConfig()
28
27
  logger = logging.getLogger("openai_agent")
29
- logger.setLevel(logging.INFO)
28
+
30
29
 
31
30
 
32
31
 
@@ -129,7 +128,7 @@ class ResponsesToolBundle:
129
128
  proxy = self._executors[name]
130
129
  result = await proxy.execute(context=context, name=name, arguments=arguments)
131
130
  logger.info("success calling %s %s %s", tool_call.id, name, result)
132
- return result
131
+ return ensure_response(result)
133
132
 
134
133
  except Exception as e:
135
134
  logger.error("failed calling %s %s", tool_call.id, name, exc_info=e)
@@ -232,7 +231,7 @@ class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
232
231
 
233
232
  class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
234
233
  def __init__(self,
235
- model: str = os.getenv("OPENAI_MODEL","gpt-4o"),
234
+ model: str = os.getenv("OPENAI_MODEL","gpt-4.1"),
236
235
  parallel_tool_calls : Optional[bool] = None,
237
236
  client: Optional[AsyncOpenAI] = None,
238
237
  retries : int = 0,
@@ -250,6 +249,8 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
250
249
  system_role = "developer"
251
250
  elif self._model.startswith("o3"):
252
251
  system_role = "developer"
252
+ elif self._model.startswith("o4"):
253
+ system_role = "developer"
253
254
  elif self._model.startswith("computer-use"):
254
255
  system_role = "developer"
255
256
 
@@ -277,13 +278,14 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
277
278
 
278
279
  def _get_client(self, *, room: RoomClient) -> AsyncOpenAI:
279
280
  if self._client != None:
281
+
280
282
  openai = self._client
281
283
  else:
282
284
  token : str = room.protocol.token
283
285
  url : str = room.room_url
284
286
 
285
287
  room_proxy_url = f"{url}/v1"
286
-
288
+
287
289
  openai=AsyncOpenAI(
288
290
  api_key=token,
289
291
  base_url=room_proxy_url,
@@ -346,7 +348,7 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
346
348
  "format" : {
347
349
  "type" : "json_schema",
348
350
  "name" : response_name,
349
- "schema" : response_schema,
351
+ "schema" : response_schema,
350
352
  "strict" : True,
351
353
  }
352
354
  }
@@ -0,0 +1 @@
1
+ __version__ = "0.0.14"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: meshagent-openai
3
- Version: 0.0.11
3
+ Version: 0.0.14
4
4
  Summary: OpenAI Building Blocks for Meshagent
5
5
  License-Expression: Apache-2.0
6
6
  Project-URL: Documentation, https://meshagent.com
@@ -13,7 +13,7 @@ Requires-Dist: pyjwt~=2.10.1
13
13
  Requires-Dist: pytest~=8.3.5
14
14
  Requires-Dist: pytest-asyncio~=0.26.0
15
15
  Requires-Dist: openai~=1.70.0
16
- Requires-Dist: meshagent-api~=0.0.11
17
- Requires-Dist: meshagent-agents~=0.0.11
18
- Requires-Dist: meshagent-tools~=0.0.11
16
+ Requires-Dist: meshagent-api~=0.0.14
17
+ Requires-Dist: meshagent-agents~=0.0.14
18
+ Requires-Dist: meshagent-tools~=0.0.14
19
19
  Dynamic: license-file
@@ -0,0 +1,7 @@
1
+ pyjwt~=2.10.1
2
+ pytest~=8.3.5
3
+ pytest-asyncio~=0.26.0
4
+ openai~=1.70.0
5
+ meshagent-api~=0.0.14
6
+ meshagent-agents~=0.0.14
7
+ meshagent-tools~=0.0.14
@@ -14,9 +14,9 @@ dependencies = [
14
14
  "pytest~=8.3.5",
15
15
  "pytest-asyncio~=0.26.0",
16
16
  "openai~=1.70.0",
17
- "meshagent-api~=0.0.11",
18
- "meshagent-agents~=0.0.11",
19
- "meshagent-tools~=0.0.11"
17
+ "meshagent-api~=0.0.14",
18
+ "meshagent-agents~=0.0.14",
19
+ "meshagent-tools~=0.0.14"
20
20
  ]
21
21
 
22
22
  [project.urls]
@@ -1 +0,0 @@
1
- __version__ = "0.0.11"
@@ -1,7 +0,0 @@
1
- pyjwt~=2.10.1
2
- pytest~=8.3.5
3
- pytest-asyncio~=0.26.0
4
- openai~=1.70.0
5
- meshagent-api~=0.0.11
6
- meshagent-agents~=0.0.11
7
- meshagent-tools~=0.0.11