meshagent-openai 0.5.18__tar.gz → 0.6.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of meshagent-openai might be problematic. Click here for more details.
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/CHANGELOG.md +9 -0
- {meshagent_openai-0.5.18/meshagent_openai.egg-info → meshagent_openai-0.6.1}/PKG-INFO +6 -6
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent/openai/proxy/proxy.py +11 -1
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent/openai/tools/completions_adapter.py +5 -8
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent/openai/tools/responses_adapter.py +222 -92
- meshagent_openai-0.6.1/meshagent/openai/version.py +1 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1/meshagent_openai.egg-info}/PKG-INFO +6 -6
- meshagent_openai-0.6.1/meshagent_openai.egg-info/requires.txt +7 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/pyproject.toml +5 -5
- meshagent_openai-0.5.18/meshagent/openai/version.py +0 -1
- meshagent_openai-0.5.18/meshagent_openai.egg-info/requires.txt +0 -7
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/LICENSE +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/MANIFEST.in +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/README.md +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent/openai/__init__.py +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent/openai/proxy/__init__.py +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent/openai/tools/__init__.py +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent/openai/tools/schema.py +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent/openai/tools/stt.py +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent/openai/tools/stt_test.py +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent_openai.egg-info/SOURCES.txt +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent_openai.egg-info/dependency_links.txt +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent_openai.egg-info/top_level.txt +0 -0
- {meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/setup.cfg +0 -0
|
@@ -1,21 +1,21 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: meshagent-openai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.6.1
|
|
4
4
|
Summary: OpenAI Building Blocks for Meshagent
|
|
5
5
|
License-Expression: Apache-2.0
|
|
6
6
|
Project-URL: Documentation, https://docs.meshagent.com
|
|
7
7
|
Project-URL: Website, https://www.meshagent.com
|
|
8
8
|
Project-URL: Source, https://www.meshagent.com
|
|
9
|
-
Requires-Python: >=3.
|
|
9
|
+
Requires-Python: >=3.13
|
|
10
10
|
Description-Content-Type: text/markdown
|
|
11
11
|
License-File: LICENSE
|
|
12
12
|
Requires-Dist: pyjwt~=2.10
|
|
13
13
|
Requires-Dist: pytest~=8.4
|
|
14
14
|
Requires-Dist: pytest-asyncio~=0.26
|
|
15
|
-
Requires-Dist: openai~=
|
|
16
|
-
Requires-Dist: meshagent-api~=0.
|
|
17
|
-
Requires-Dist: meshagent-agents~=0.
|
|
18
|
-
Requires-Dist: meshagent-tools~=0.
|
|
15
|
+
Requires-Dist: openai~=2.6.0
|
|
16
|
+
Requires-Dist: meshagent-api~=0.6.1
|
|
17
|
+
Requires-Dist: meshagent-agents~=0.6.1
|
|
18
|
+
Requires-Dist: meshagent-tools~=0.6.1
|
|
19
19
|
Dynamic: license-file
|
|
20
20
|
|
|
21
21
|
# [Meshagent](https://www.meshagent.com)
|
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
from meshagent.api import RoomClient
|
|
2
2
|
from openai import AsyncOpenAI
|
|
3
|
+
import logging
|
|
4
|
+
|
|
5
|
+
logger = logging.getLogger("openai.client")
|
|
3
6
|
|
|
4
7
|
|
|
5
8
|
def get_client(*, room: RoomClient) -> AsyncOpenAI:
|
|
@@ -8,7 +11,14 @@ def get_client(*, room: RoomClient) -> AsyncOpenAI:
|
|
|
8
11
|
# when running inside the room pod, the room.room_url currently points to the external url
|
|
9
12
|
# so we need to use url off the protocol (if available).
|
|
10
13
|
# TODO: room_url should be set properly, but may need a claim in the token to be set during call to say it is local
|
|
11
|
-
url
|
|
14
|
+
url = getattr(room.protocol, "url")
|
|
15
|
+
if url is None:
|
|
16
|
+
logger.debug(
|
|
17
|
+
f"protocol does not have url, openai client falling back to room url {room.room_url}"
|
|
18
|
+
)
|
|
19
|
+
url = room.room_url
|
|
20
|
+
else:
|
|
21
|
+
logger.debug(f"protocol had url, openai client will use {url}")
|
|
12
22
|
|
|
13
23
|
room_proxy_url = f"{url}/v1"
|
|
14
24
|
|
{meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent/openai/tools/completions_adapter.py
RENAMED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
from meshagent.agents.agent import AgentChatContext
|
|
2
|
-
from meshagent.api import RoomClient, RoomException
|
|
3
|
-
from meshagent.tools.blob import Blob, BlobStorage
|
|
2
|
+
from meshagent.api import RoomClient, RoomException, RemoteParticipant
|
|
4
3
|
from meshagent.tools import Toolkit, ToolContext
|
|
5
4
|
from meshagent.api.messaging import (
|
|
6
5
|
Response,
|
|
@@ -135,8 +134,7 @@ class CompletionsToolBundle:
|
|
|
135
134
|
|
|
136
135
|
# Converts a tool response into a series of messages that can be inserted into the openai context
|
|
137
136
|
class OpenAICompletionsToolResponseAdapter(ToolResponseAdapter):
|
|
138
|
-
def __init__(self
|
|
139
|
-
self._blob_storage = blob_storage
|
|
137
|
+
def __init__(self):
|
|
140
138
|
pass
|
|
141
139
|
|
|
142
140
|
async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
|
|
@@ -155,10 +153,7 @@ class OpenAICompletionsToolResponseAdapter(ToolResponseAdapter):
|
|
|
155
153
|
return response.text
|
|
156
154
|
|
|
157
155
|
elif isinstance(response, FileResponse):
|
|
158
|
-
|
|
159
|
-
uri = self._blob_storage.store(blob=blob)
|
|
160
|
-
|
|
161
|
-
return f"The results have been written to a blob with the uri {uri} with the mime type {blob.mime_type}."
|
|
156
|
+
return f"{response.name}"
|
|
162
157
|
|
|
163
158
|
elif isinstance(response, EmptyResponse):
|
|
164
159
|
return "ok"
|
|
@@ -247,11 +242,13 @@ class OpenAICompletionsAdapter(LLMAdapter):
|
|
|
247
242
|
async def next(
|
|
248
243
|
self,
|
|
249
244
|
*,
|
|
245
|
+
model: Optional[str] = None,
|
|
250
246
|
context: AgentChatContext,
|
|
251
247
|
room: RoomClient,
|
|
252
248
|
toolkits: Toolkit,
|
|
253
249
|
tool_adapter: Optional[ToolResponseAdapter] = None,
|
|
254
250
|
output_schema: Optional[dict] = None,
|
|
251
|
+
on_behalf_of: Optional[RemoteParticipant] = None,
|
|
255
252
|
):
|
|
256
253
|
if tool_adapter is None:
|
|
257
254
|
tool_adapter = OpenAICompletionsToolResponseAdapter()
|
{meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent/openai/tools/responses_adapter.py
RENAMED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
from meshagent.agents.agent import AgentChatContext
|
|
2
|
-
from meshagent.api import RoomClient, RoomException
|
|
3
|
-
from meshagent.tools.blob import Blob, BlobStorage
|
|
2
|
+
from meshagent.api import RoomClient, RoomException, RemoteParticipant
|
|
4
3
|
from meshagent.tools import Toolkit, ToolContext, Tool, BaseTool
|
|
5
4
|
from meshagent.api.messaging import (
|
|
6
5
|
Response,
|
|
@@ -12,7 +11,12 @@ from meshagent.api.messaging import (
|
|
|
12
11
|
RawOutputs,
|
|
13
12
|
ensure_response,
|
|
14
13
|
)
|
|
15
|
-
from meshagent.agents.adapter import
|
|
14
|
+
from meshagent.agents.adapter import (
|
|
15
|
+
ToolResponseAdapter,
|
|
16
|
+
LLMAdapter,
|
|
17
|
+
ToolkitBuilder,
|
|
18
|
+
ToolkitConfig,
|
|
19
|
+
)
|
|
16
20
|
import json
|
|
17
21
|
from typing import List, Literal
|
|
18
22
|
from meshagent.openai.proxy import get_client
|
|
@@ -165,8 +169,7 @@ class ResponsesToolBundle:
|
|
|
165
169
|
|
|
166
170
|
# Converts a tool response into a series of messages that can be inserted into the openai context
|
|
167
171
|
class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
|
|
168
|
-
def __init__(self
|
|
169
|
-
self._blob_storage = blob_storage
|
|
172
|
+
def __init__(self):
|
|
170
173
|
pass
|
|
171
174
|
|
|
172
175
|
async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
|
|
@@ -185,10 +188,7 @@ class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
|
|
|
185
188
|
return response.text
|
|
186
189
|
|
|
187
190
|
elif isinstance(response, FileResponse):
|
|
188
|
-
|
|
189
|
-
uri = self._blob_storage.store(blob=blob)
|
|
190
|
-
|
|
191
|
-
return f"The results have been written to a blob with the uri {uri} with the mime type {blob.mime_type}."
|
|
191
|
+
return f"{response.name}"
|
|
192
192
|
|
|
193
193
|
elif isinstance(response, EmptyResponse):
|
|
194
194
|
return "ok"
|
|
@@ -246,39 +246,88 @@ class OpenAIResponsesToolResponseAdapter(ToolResponseAdapter):
|
|
|
246
246
|
)
|
|
247
247
|
|
|
248
248
|
return response.outputs
|
|
249
|
+
|
|
249
250
|
else:
|
|
250
251
|
span.set_attribute("kind", "text")
|
|
251
|
-
|
|
252
|
-
|
|
252
|
+
if isinstance(response, FileResponse):
|
|
253
|
+
if response.mime_type.startswith("image/"):
|
|
254
|
+
span.set_attribute(
|
|
255
|
+
"output", f"image: {response.name}, {response.mime_type}"
|
|
256
|
+
)
|
|
253
257
|
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
258
|
+
message = {
|
|
259
|
+
"output": [
|
|
260
|
+
{
|
|
261
|
+
"type": "input_image",
|
|
262
|
+
"image_url": f"data:{response.mime_type};base64,{base64.b64encode(response.data).decode()}",
|
|
263
|
+
}
|
|
264
|
+
],
|
|
265
|
+
"call_id": tool_call.call_id,
|
|
266
|
+
"type": "function_call_output",
|
|
267
|
+
}
|
|
268
|
+
else:
|
|
269
|
+
span.set_attribute(
|
|
270
|
+
"output", f"file: {response.name}, {response.mime_type}"
|
|
271
|
+
)
|
|
259
272
|
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
273
|
+
message = {
|
|
274
|
+
"output": [
|
|
275
|
+
{
|
|
276
|
+
"type": "input_file",
|
|
277
|
+
"filename": response.name,
|
|
278
|
+
"file_data": f"data:{response.mime_type};base64,{base64.b64encode(response.data).decode()}",
|
|
279
|
+
}
|
|
280
|
+
],
|
|
281
|
+
"call_id": tool_call.call_id,
|
|
282
|
+
"type": "function_call_output",
|
|
283
|
+
}
|
|
271
284
|
|
|
272
|
-
|
|
285
|
+
room.developer.log_nowait(
|
|
286
|
+
type="llm.message",
|
|
287
|
+
data={
|
|
288
|
+
"context": context.id,
|
|
289
|
+
"participant_id": room.local_participant.id,
|
|
290
|
+
"participant_name": room.local_participant.get_attribute(
|
|
291
|
+
"name"
|
|
292
|
+
),
|
|
293
|
+
"message": message,
|
|
294
|
+
},
|
|
295
|
+
)
|
|
273
296
|
|
|
297
|
+
return [message]
|
|
298
|
+
else:
|
|
299
|
+
output = await self.to_plain_text(room=room, response=response)
|
|
300
|
+
span.set_attribute("output", output)
|
|
274
301
|
|
|
275
|
-
|
|
302
|
+
message = {
|
|
303
|
+
"output": output,
|
|
304
|
+
"call_id": tool_call.call_id,
|
|
305
|
+
"type": "function_call_output",
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
room.developer.log_nowait(
|
|
309
|
+
type="llm.message",
|
|
310
|
+
data={
|
|
311
|
+
"context": context.id,
|
|
312
|
+
"participant_id": room.local_participant.id,
|
|
313
|
+
"participant_name": room.local_participant.get_attribute(
|
|
314
|
+
"name"
|
|
315
|
+
),
|
|
316
|
+
"message": message,
|
|
317
|
+
},
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
return [message]
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
class OpenAIResponsesAdapter(LLMAdapter[ResponseStreamEvent]):
|
|
276
324
|
def __init__(
|
|
277
325
|
self,
|
|
278
|
-
model: str = os.getenv("OPENAI_MODEL", "gpt-
|
|
326
|
+
model: str = os.getenv("OPENAI_MODEL", "gpt-5"),
|
|
279
327
|
parallel_tool_calls: Optional[bool] = None,
|
|
280
328
|
client: Optional[AsyncOpenAI] = None,
|
|
281
329
|
response_options: Optional[dict] = None,
|
|
330
|
+
reasoning_effort: Optional[str] = None,
|
|
282
331
|
provider: str = "openai",
|
|
283
332
|
):
|
|
284
333
|
self._model = model
|
|
@@ -286,6 +335,10 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
|
286
335
|
self._client = client
|
|
287
336
|
self._response_options = response_options
|
|
288
337
|
self._provider = provider
|
|
338
|
+
self._reasoning_effort = reasoning_effort
|
|
339
|
+
|
|
340
|
+
def default_model(self) -> str:
|
|
341
|
+
return self._model
|
|
289
342
|
|
|
290
343
|
def create_chat_context(self):
|
|
291
344
|
system_role = "system"
|
|
@@ -316,13 +369,18 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
|
316
369
|
async def next(
|
|
317
370
|
self,
|
|
318
371
|
*,
|
|
372
|
+
model: Optional[str] = None,
|
|
319
373
|
context: AgentChatContext,
|
|
320
374
|
room: RoomClient,
|
|
321
375
|
toolkits: list[Toolkit],
|
|
322
376
|
tool_adapter: Optional[ToolResponseAdapter] = None,
|
|
323
377
|
output_schema: Optional[dict] = None,
|
|
324
378
|
event_handler: Optional[Callable[[ResponseStreamEvent], None]] = None,
|
|
379
|
+
on_behalf_of: Optional[RemoteParticipant] = None,
|
|
325
380
|
):
|
|
381
|
+
if model is None:
|
|
382
|
+
model = self.default_model()
|
|
383
|
+
|
|
326
384
|
with tracer.start_as_current_span("llm.turn") as span:
|
|
327
385
|
span.set_attributes({"chat_context": context.id, "api": "responses"})
|
|
328
386
|
|
|
@@ -333,7 +391,7 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
|
333
391
|
while True:
|
|
334
392
|
with tracer.start_as_current_span("llm.turn.iteration") as span:
|
|
335
393
|
span.set_attributes(
|
|
336
|
-
{"model":
|
|
394
|
+
{"model": model, "provider": self._provider}
|
|
337
395
|
)
|
|
338
396
|
|
|
339
397
|
openai = (
|
|
@@ -359,7 +417,7 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
|
359
417
|
|
|
360
418
|
ptc = self._parallel_tool_calls
|
|
361
419
|
extra = {}
|
|
362
|
-
if ptc is not None and not
|
|
420
|
+
if ptc is not None and not model.startswith("o"):
|
|
363
421
|
extra["parallel_tool_calls"] = ptc
|
|
364
422
|
span.set_attribute("parallel_tool_calls", ptc)
|
|
365
423
|
else:
|
|
@@ -386,12 +444,30 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
|
386
444
|
stream = event_handler is not None
|
|
387
445
|
|
|
388
446
|
with tracer.start_as_current_span("llm.invoke") as span:
|
|
389
|
-
response_options = self._response_options
|
|
447
|
+
response_options = copy.deepcopy(self._response_options)
|
|
390
448
|
if response_options is None:
|
|
391
449
|
response_options = {}
|
|
450
|
+
|
|
451
|
+
if self._reasoning_effort is not None:
|
|
452
|
+
response_options["reasoning"] = {
|
|
453
|
+
"effort": self._reasoning_effort,
|
|
454
|
+
"summary": "detailed",
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
extra_headers = {}
|
|
458
|
+
if on_behalf_of is not None:
|
|
459
|
+
on_behalf_of_name = on_behalf_of.get_attribute("name")
|
|
460
|
+
logger.info(
|
|
461
|
+
f"{room.local_participant.get_attribute('name')} making openai request on behalf of {on_behalf_of_name}"
|
|
462
|
+
)
|
|
463
|
+
extra_headers["Meshagent-On-Behalf-Of"] = (
|
|
464
|
+
on_behalf_of_name
|
|
465
|
+
)
|
|
466
|
+
|
|
392
467
|
response: Response = await openai.responses.create(
|
|
468
|
+
extra_headers=extra_headers,
|
|
393
469
|
stream=stream,
|
|
394
|
-
model=
|
|
470
|
+
model=model,
|
|
395
471
|
input=context.messages,
|
|
396
472
|
tools=open_ai_tools,
|
|
397
473
|
text=text,
|
|
@@ -727,7 +803,6 @@ class OpenAIResponsesAdapter(LLMAdapter[ResponsesToolBundle]):
|
|
|
727
803
|
"event": safe_model_dump(event),
|
|
728
804
|
}
|
|
729
805
|
)
|
|
730
|
-
|
|
731
806
|
event_handler(event)
|
|
732
807
|
|
|
733
808
|
if event.type == "response.completed":
|
|
@@ -815,32 +890,47 @@ class OpenAIResponsesTool(BaseTool):
|
|
|
815
890
|
return {}
|
|
816
891
|
|
|
817
892
|
|
|
893
|
+
class ImageGenerationConfig(ToolkitConfig):
|
|
894
|
+
name: Literal["image_generation"]
|
|
895
|
+
background: Literal["transparent", "opaque", "auto"] = None
|
|
896
|
+
input_image_mask_url: Optional[str] = None
|
|
897
|
+
model: Optional[str] = None
|
|
898
|
+
moderation: Optional[str] = None
|
|
899
|
+
output_compression: Optional[int] = None
|
|
900
|
+
output_format: Optional[Literal["png", "webp", "jpeg"]] = None
|
|
901
|
+
partial_images: Optional[int] = None
|
|
902
|
+
quality: Optional[Literal["auto", "low", "medium", "high"]] = None
|
|
903
|
+
size: Optional[Literal["1024x1024", "1024x1536", "1536x1024", "auto"]] = None
|
|
904
|
+
|
|
905
|
+
|
|
906
|
+
class ImageGenerationToolkitBuilder(ToolkitBuilder):
|
|
907
|
+
def __init__(self):
|
|
908
|
+
super().__init__(name="image_generation", type=ImageGenerationConfig)
|
|
909
|
+
|
|
910
|
+
def make(self, *, model: str, config: ImageGenerationConfig):
|
|
911
|
+
return Toolkit(
|
|
912
|
+
name="image_generation", tools=[ImageGenerationTool(config=config)]
|
|
913
|
+
)
|
|
914
|
+
|
|
915
|
+
|
|
818
916
|
class ImageGenerationTool(OpenAIResponsesTool):
|
|
819
917
|
def __init__(
|
|
820
918
|
self,
|
|
821
919
|
*,
|
|
822
|
-
|
|
823
|
-
input_image_mask_url: Optional[str] = None,
|
|
824
|
-
model: Optional[str] = None,
|
|
825
|
-
moderation: Optional[str] = None,
|
|
826
|
-
output_compression: Optional[int] = None,
|
|
827
|
-
output_format: Optional[Literal["png", "webp", "jpeg"]] = None,
|
|
828
|
-
partial_images: Optional[int] = None,
|
|
829
|
-
quality: Optional[Literal["auto", "low", "medium", "high"]] = None,
|
|
830
|
-
size: Optional[Literal["1024x1024", "1024x1536", "1536x1024", "auto"]] = None,
|
|
920
|
+
config: ImageGenerationConfig,
|
|
831
921
|
):
|
|
832
922
|
super().__init__(name="image_generation")
|
|
833
|
-
self.background = background
|
|
834
|
-
self.input_image_mask_url = input_image_mask_url
|
|
835
|
-
self.model = model
|
|
836
|
-
self.moderation = moderation
|
|
837
|
-
self.output_compression = output_compression
|
|
838
|
-
self.output_format = output_format
|
|
839
|
-
|
|
840
|
-
partial_images
|
|
841
|
-
|
|
842
|
-
self.quality = quality
|
|
843
|
-
self.size = size
|
|
923
|
+
self.background = config.background
|
|
924
|
+
self.input_image_mask_url = config.input_image_mask_url
|
|
925
|
+
self.model = config.model
|
|
926
|
+
self.moderation = config.moderation
|
|
927
|
+
self.output_compression = config.output_compression
|
|
928
|
+
self.output_format = config.output_format
|
|
929
|
+
self.partial_images = (
|
|
930
|
+
config.partial_images if config.partial_images is not None else 1
|
|
931
|
+
)
|
|
932
|
+
self.quality = config.quality
|
|
933
|
+
self.size = config.size
|
|
844
934
|
|
|
845
935
|
def get_open_ai_tool_definitions(self):
|
|
846
936
|
opts = {"type": "image_generation"}
|
|
@@ -986,8 +1076,20 @@ class ImageGenerationTool(OpenAIResponsesTool):
|
|
|
986
1076
|
)
|
|
987
1077
|
|
|
988
1078
|
|
|
989
|
-
class
|
|
1079
|
+
class LocalShellConfig(ToolkitConfig):
|
|
1080
|
+
name: Literal["local_shell"]
|
|
1081
|
+
|
|
1082
|
+
|
|
1083
|
+
class LocalShellToolkitBuilder(ToolkitBuilder):
|
|
990
1084
|
def __init__(self):
|
|
1085
|
+
super().__init__(name="local_shell", type=LocalShellConfig)
|
|
1086
|
+
|
|
1087
|
+
def make(self, *, model: str, config: LocalShellConfig):
|
|
1088
|
+
return Toolkit(name="local_shell", tools=[LocalShellTool(config=config)])
|
|
1089
|
+
|
|
1090
|
+
|
|
1091
|
+
class LocalShellTool(OpenAIResponsesTool):
|
|
1092
|
+
def __init__(self, *, config: LocalShellConfig):
|
|
991
1093
|
super().__init__(name="local_shell")
|
|
992
1094
|
|
|
993
1095
|
def get_open_ai_tool_definitions(self):
|
|
@@ -1153,34 +1255,40 @@ class MCPToolDefinition:
|
|
|
1153
1255
|
self.description = description
|
|
1154
1256
|
|
|
1155
1257
|
|
|
1156
|
-
class MCPServer:
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1258
|
+
class MCPServer(BaseModel):
|
|
1259
|
+
server_label: str
|
|
1260
|
+
server_url: Optional[str] = None
|
|
1261
|
+
allowed_tools: Optional[list[str]] = None
|
|
1262
|
+
authorization: Optional[str] = None
|
|
1263
|
+
headers: Optional[dict] = None
|
|
1264
|
+
|
|
1265
|
+
# require approval for all tools
|
|
1266
|
+
require_approval: Optional[Literal["always", "never"]] = None
|
|
1267
|
+
# list of tools that always require approval
|
|
1268
|
+
always_require_approval: Optional[list[str]] = None
|
|
1269
|
+
# list of tools that never require approval
|
|
1270
|
+
never_require_approval: Optional[list[str]] = None
|
|
1271
|
+
|
|
1272
|
+
openai_connector_id: Optional[str] = None
|
|
1273
|
+
|
|
1274
|
+
|
|
1275
|
+
class MCPConfig(ToolkitConfig):
|
|
1276
|
+
name: Literal["mcp"]
|
|
1277
|
+
servers: list[MCPServer]
|
|
1278
|
+
|
|
1279
|
+
|
|
1280
|
+
class MCPToolkitBuilder(ToolkitBuilder):
|
|
1281
|
+
def __init__(self):
|
|
1282
|
+
super().__init__(name="mcp", type=MCPConfig)
|
|
1283
|
+
|
|
1284
|
+
def make(self, *, model: str, config: MCPConfig):
|
|
1285
|
+
return Toolkit(name="mcp", tools=[MCPTool(config=config)])
|
|
1178
1286
|
|
|
1179
1287
|
|
|
1180
1288
|
class MCPTool(OpenAIResponsesTool):
|
|
1181
|
-
def __init__(self, *,
|
|
1289
|
+
def __init__(self, *, config: MCPConfig):
|
|
1182
1290
|
super().__init__(name="mcp")
|
|
1183
|
-
self.servers = servers
|
|
1291
|
+
self.servers = config.servers
|
|
1184
1292
|
|
|
1185
1293
|
def get_open_ai_tool_definitions(self):
|
|
1186
1294
|
defs = []
|
|
@@ -1188,12 +1296,20 @@ class MCPTool(OpenAIResponsesTool):
|
|
|
1188
1296
|
opts = {
|
|
1189
1297
|
"type": "mcp",
|
|
1190
1298
|
"server_label": server.server_label,
|
|
1191
|
-
"server_url": server.server_url,
|
|
1192
1299
|
}
|
|
1193
1300
|
|
|
1301
|
+
if server.server_url is not None:
|
|
1302
|
+
opts["server_url"] = server.server_url
|
|
1303
|
+
|
|
1304
|
+
if server.openai_connector_id is not None:
|
|
1305
|
+
opts["connector_id"] = server.openai_connector_id
|
|
1306
|
+
|
|
1194
1307
|
if server.allowed_tools is not None:
|
|
1195
1308
|
opts["allowed_tools"] = server.allowed_tools
|
|
1196
1309
|
|
|
1310
|
+
if server.authorization is not None:
|
|
1311
|
+
opts["authorization"] = server.authorization
|
|
1312
|
+
|
|
1197
1313
|
if server.headers is not None:
|
|
1198
1314
|
opts["headers"] = server.headers
|
|
1199
1315
|
|
|
@@ -1396,19 +1512,19 @@ class MCPTool(OpenAIResponsesTool):
|
|
|
1396
1512
|
type: str,
|
|
1397
1513
|
**extra,
|
|
1398
1514
|
):
|
|
1399
|
-
logger.info("approval requested for MCP tool {server_label}.{name}")
|
|
1515
|
+
logger.info(f"approval requested for MCP tool {server_label}.{name}")
|
|
1400
1516
|
should_approve = await self.on_mcp_approval_request(
|
|
1401
1517
|
context, arguments=arguments, name=name, server_label=server_label
|
|
1402
1518
|
)
|
|
1403
1519
|
if should_approve:
|
|
1404
|
-
logger.info("approval granted for MCP tool {server_label}.{name}")
|
|
1520
|
+
logger.info(f"approval granted for MCP tool {server_label}.{name}")
|
|
1405
1521
|
return {
|
|
1406
1522
|
"type": "mcp_approval_response",
|
|
1407
1523
|
"approve": True,
|
|
1408
1524
|
"approval_request_id": id,
|
|
1409
1525
|
}
|
|
1410
1526
|
else:
|
|
1411
|
-
logger.info("approval denied for MCP tool {server_label}.{name}")
|
|
1527
|
+
logger.info(f"approval denied for MCP tool {server_label}.{name}")
|
|
1412
1528
|
return {
|
|
1413
1529
|
"type": "mcp_approval_response",
|
|
1414
1530
|
"approve": False,
|
|
@@ -1442,7 +1558,6 @@ class ReasoningTool(OpenAIResponsesTool):
|
|
|
1442
1558
|
part: dict,
|
|
1443
1559
|
sequence_number: int,
|
|
1444
1560
|
summary_index: int,
|
|
1445
|
-
text: str,
|
|
1446
1561
|
type: str,
|
|
1447
1562
|
**extra,
|
|
1448
1563
|
):
|
|
@@ -1457,7 +1572,6 @@ class ReasoningTool(OpenAIResponsesTool):
|
|
|
1457
1572
|
part: dict,
|
|
1458
1573
|
sequence_number: int,
|
|
1459
1574
|
summary_index: int,
|
|
1460
|
-
text: str,
|
|
1461
1575
|
type: str,
|
|
1462
1576
|
**extra,
|
|
1463
1577
|
):
|
|
@@ -1471,7 +1585,6 @@ class ReasoningTool(OpenAIResponsesTool):
|
|
|
1471
1585
|
output_index: int,
|
|
1472
1586
|
sequence_number: int,
|
|
1473
1587
|
summary_index: int,
|
|
1474
|
-
text: str,
|
|
1475
1588
|
type: str,
|
|
1476
1589
|
**extra,
|
|
1477
1590
|
):
|
|
@@ -1485,7 +1598,6 @@ class ReasoningTool(OpenAIResponsesTool):
|
|
|
1485
1598
|
output_index: int,
|
|
1486
1599
|
sequence_number: int,
|
|
1487
1600
|
summary_index: int,
|
|
1488
|
-
text: str,
|
|
1489
1601
|
type: str,
|
|
1490
1602
|
**extra,
|
|
1491
1603
|
):
|
|
@@ -1495,7 +1607,8 @@ class ReasoningTool(OpenAIResponsesTool):
|
|
|
1495
1607
|
self,
|
|
1496
1608
|
context: ToolContext,
|
|
1497
1609
|
*,
|
|
1498
|
-
summary: str,
|
|
1610
|
+
summary: list[str],
|
|
1611
|
+
content: Optional[list[str]] = None,
|
|
1499
1612
|
encrypted_content: str | None,
|
|
1500
1613
|
status: Literal["in_progress", "completed", "incomplete"],
|
|
1501
1614
|
):
|
|
@@ -1506,22 +1619,39 @@ class ReasoningTool(OpenAIResponsesTool):
|
|
|
1506
1619
|
context: ToolContext,
|
|
1507
1620
|
*,
|
|
1508
1621
|
id: str,
|
|
1509
|
-
summary:
|
|
1622
|
+
summary: list[dict],
|
|
1510
1623
|
type: str,
|
|
1624
|
+
content: Optional[list[dict]],
|
|
1511
1625
|
encrypted_content: str | None,
|
|
1512
1626
|
status: str,
|
|
1513
1627
|
**extra,
|
|
1514
1628
|
):
|
|
1515
1629
|
await self.on_reasoning(
|
|
1516
|
-
context,
|
|
1630
|
+
context,
|
|
1631
|
+
summary=summary,
|
|
1632
|
+
content=content,
|
|
1633
|
+
encrypted_content=encrypted_content,
|
|
1634
|
+
status=status,
|
|
1517
1635
|
)
|
|
1518
1636
|
|
|
1519
1637
|
|
|
1520
1638
|
# TODO: computer tool call
|
|
1521
1639
|
|
|
1522
1640
|
|
|
1523
|
-
class
|
|
1641
|
+
class WebSearchConfig(ToolkitConfig):
|
|
1642
|
+
name: Literal["web_search"]
|
|
1643
|
+
|
|
1644
|
+
|
|
1645
|
+
class WebSearchToolkitBuilder(ToolkitBuilder):
|
|
1524
1646
|
def __init__(self):
|
|
1647
|
+
super().__init__(name="web_search", type=WebSearchConfig)
|
|
1648
|
+
|
|
1649
|
+
def make(self, *, model: str, config: WebSearchConfig):
|
|
1650
|
+
return Toolkit(name="web_search", tools=[WebSearchTool(config=config)])
|
|
1651
|
+
|
|
1652
|
+
|
|
1653
|
+
class WebSearchTool(OpenAIResponsesTool):
|
|
1654
|
+
def __init__(self, *, config: WebSearchConfig):
|
|
1525
1655
|
super().__init__(name="web_search")
|
|
1526
1656
|
|
|
1527
1657
|
def get_open_ai_tool_definitions(self) -> list[dict]:
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.6.1"
|
|
@@ -1,21 +1,21 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: meshagent-openai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.6.1
|
|
4
4
|
Summary: OpenAI Building Blocks for Meshagent
|
|
5
5
|
License-Expression: Apache-2.0
|
|
6
6
|
Project-URL: Documentation, https://docs.meshagent.com
|
|
7
7
|
Project-URL: Website, https://www.meshagent.com
|
|
8
8
|
Project-URL: Source, https://www.meshagent.com
|
|
9
|
-
Requires-Python: >=3.
|
|
9
|
+
Requires-Python: >=3.13
|
|
10
10
|
Description-Content-Type: text/markdown
|
|
11
11
|
License-File: LICENSE
|
|
12
12
|
Requires-Dist: pyjwt~=2.10
|
|
13
13
|
Requires-Dist: pytest~=8.4
|
|
14
14
|
Requires-Dist: pytest-asyncio~=0.26
|
|
15
|
-
Requires-Dist: openai~=
|
|
16
|
-
Requires-Dist: meshagent-api~=0.
|
|
17
|
-
Requires-Dist: meshagent-agents~=0.
|
|
18
|
-
Requires-Dist: meshagent-tools~=0.
|
|
15
|
+
Requires-Dist: openai~=2.6.0
|
|
16
|
+
Requires-Dist: meshagent-api~=0.6.1
|
|
17
|
+
Requires-Dist: meshagent-agents~=0.6.1
|
|
18
|
+
Requires-Dist: meshagent-tools~=0.6.1
|
|
19
19
|
Dynamic: license-file
|
|
20
20
|
|
|
21
21
|
# [Meshagent](https://www.meshagent.com)
|
|
@@ -6,17 +6,17 @@ build-backend = "setuptools.build_meta"
|
|
|
6
6
|
name = "meshagent-openai"
|
|
7
7
|
description = "OpenAI Building Blocks for Meshagent"
|
|
8
8
|
dynamic = ["version", "readme"]
|
|
9
|
-
requires-python = ">=3.
|
|
9
|
+
requires-python = ">=3.13"
|
|
10
10
|
license = "Apache-2.0"
|
|
11
11
|
keywords = []
|
|
12
12
|
dependencies = [
|
|
13
13
|
"pyjwt~=2.10",
|
|
14
14
|
"pytest~=8.4",
|
|
15
15
|
"pytest-asyncio~=0.26",
|
|
16
|
-
"openai~=
|
|
17
|
-
"meshagent-api~=0.
|
|
18
|
-
"meshagent-agents~=0.
|
|
19
|
-
"meshagent-tools~=0.
|
|
16
|
+
"openai~=2.6.0",
|
|
17
|
+
"meshagent-api~=0.6.1",
|
|
18
|
+
"meshagent-agents~=0.6.1",
|
|
19
|
+
"meshagent-tools~=0.6.1"
|
|
20
20
|
]
|
|
21
21
|
|
|
22
22
|
[project.urls]
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.5.18"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{meshagent_openai-0.5.18 → meshagent_openai-0.6.1}/meshagent_openai.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|