openai-sdk-helpers 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +62 -0
- openai_sdk_helpers/agent/__init__.py +31 -0
- openai_sdk_helpers/agent/base.py +330 -0
- openai_sdk_helpers/agent/config.py +66 -0
- openai_sdk_helpers/agent/project_manager.py +511 -0
- openai_sdk_helpers/agent/prompt_utils.py +9 -0
- openai_sdk_helpers/agent/runner.py +215 -0
- openai_sdk_helpers/agent/summarizer.py +85 -0
- openai_sdk_helpers/agent/translator.py +139 -0
- openai_sdk_helpers/agent/utils.py +47 -0
- openai_sdk_helpers/agent/validation.py +97 -0
- openai_sdk_helpers/agent/vector_search.py +462 -0
- openai_sdk_helpers/agent/web_search.py +404 -0
- openai_sdk_helpers/config.py +199 -0
- openai_sdk_helpers/enums/__init__.py +7 -0
- openai_sdk_helpers/enums/base.py +29 -0
- openai_sdk_helpers/environment.py +27 -0
- openai_sdk_helpers/prompt/__init__.py +77 -0
- openai_sdk_helpers/py.typed +0 -0
- openai_sdk_helpers/response/__init__.py +20 -0
- openai_sdk_helpers/response/base.py +505 -0
- openai_sdk_helpers/response/messages.py +211 -0
- openai_sdk_helpers/response/runner.py +104 -0
- openai_sdk_helpers/response/tool_call.py +70 -0
- openai_sdk_helpers/response/vector_store.py +84 -0
- openai_sdk_helpers/structure/__init__.py +43 -0
- openai_sdk_helpers/structure/agent_blueprint.py +224 -0
- openai_sdk_helpers/structure/base.py +713 -0
- openai_sdk_helpers/structure/plan/__init__.py +13 -0
- openai_sdk_helpers/structure/plan/enum.py +64 -0
- openai_sdk_helpers/structure/plan/plan.py +253 -0
- openai_sdk_helpers/structure/plan/task.py +122 -0
- openai_sdk_helpers/structure/prompt.py +24 -0
- openai_sdk_helpers/structure/responses.py +132 -0
- openai_sdk_helpers/structure/summary.py +65 -0
- openai_sdk_helpers/structure/validation.py +47 -0
- openai_sdk_helpers/structure/vector_search.py +86 -0
- openai_sdk_helpers/structure/web_search.py +46 -0
- openai_sdk_helpers/utils/__init__.py +25 -0
- openai_sdk_helpers/utils/core.py +300 -0
- openai_sdk_helpers/vector_storage/__init__.py +15 -0
- openai_sdk_helpers/vector_storage/cleanup.py +91 -0
- openai_sdk_helpers/vector_storage/storage.py +564 -0
- openai_sdk_helpers/vector_storage/types.py +58 -0
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.7.dist-info}/METADATA +6 -3
- openai_sdk_helpers-0.0.7.dist-info/RECORD +51 -0
- openai_sdk_helpers-0.0.5.dist-info/RECORD +0 -7
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.7.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
"""Message containers for shared OpenAI responses."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from datetime import datetime, timezone
|
|
7
|
+
from typing import Dict, List, Union, cast
|
|
8
|
+
|
|
9
|
+
from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
|
|
10
|
+
from openai.types.responses.response_function_tool_call_param import (
|
|
11
|
+
ResponseFunctionToolCallParam,
|
|
12
|
+
)
|
|
13
|
+
from openai.types.responses.response_input_message_content_list_param import (
|
|
14
|
+
ResponseInputMessageContentListParam,
|
|
15
|
+
)
|
|
16
|
+
from openai.types.responses.response_input_param import (
|
|
17
|
+
FunctionCallOutput,
|
|
18
|
+
ResponseInputItemParam,
|
|
19
|
+
)
|
|
20
|
+
from openai.types.responses.response_output_message import ResponseOutputMessage
|
|
21
|
+
|
|
22
|
+
from ..utils import JSONSerializable
|
|
23
|
+
from .tool_call import ResponseToolCall
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class ResponseMessage(JSONSerializable):
|
|
28
|
+
"""Single message exchanged with the OpenAI client.
|
|
29
|
+
|
|
30
|
+
Methods
|
|
31
|
+
-------
|
|
32
|
+
to_openai_format()
|
|
33
|
+
Return the payload in the format expected by the OpenAI client.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
role: str # "user", "assistant", "tool", etc.
|
|
37
|
+
content: (
|
|
38
|
+
ResponseInputItemParam
|
|
39
|
+
| ResponseOutputMessage
|
|
40
|
+
| ResponseFunctionToolCallParam
|
|
41
|
+
| FunctionCallOutput
|
|
42
|
+
| ResponseInputMessageContentListParam
|
|
43
|
+
)
|
|
44
|
+
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
45
|
+
metadata: Dict[str, Union[str, float, bool]] = field(default_factory=dict)
|
|
46
|
+
|
|
47
|
+
def to_openai_format(
|
|
48
|
+
self,
|
|
49
|
+
) -> (
|
|
50
|
+
ResponseInputItemParam
|
|
51
|
+
| ResponseOutputMessage
|
|
52
|
+
| ResponseFunctionToolCallParam
|
|
53
|
+
| FunctionCallOutput
|
|
54
|
+
| ResponseInputMessageContentListParam
|
|
55
|
+
):
|
|
56
|
+
"""Return the message in the format expected by the OpenAI client.
|
|
57
|
+
|
|
58
|
+
Returns
|
|
59
|
+
-------
|
|
60
|
+
ResponseInputItemParam | ResponseOutputMessage | ResponseFunctionToolCallParam | FunctionCallOutput | ResponseInputMessageContentListParam
|
|
61
|
+
Stored message content in OpenAI format.
|
|
62
|
+
"""
|
|
63
|
+
return self.content
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclass
|
|
67
|
+
class ResponseMessages(JSONSerializable):
|
|
68
|
+
"""Represent a collection of messages in a response.
|
|
69
|
+
|
|
70
|
+
This dataclass encapsulates user inputs and assistant outputs during an
|
|
71
|
+
OpenAI API interaction.
|
|
72
|
+
|
|
73
|
+
Methods
|
|
74
|
+
-------
|
|
75
|
+
add_system_message(content, **metadata)
|
|
76
|
+
Append a system message to the conversation.
|
|
77
|
+
add_user_message(input_content, **metadata)
|
|
78
|
+
Append a user message to the conversation.
|
|
79
|
+
add_assistant_message(content, metadata)
|
|
80
|
+
Append an assistant message to the conversation.
|
|
81
|
+
add_tool_message(content, output, **metadata)
|
|
82
|
+
Record a tool call and its output.
|
|
83
|
+
to_openai_payload()
|
|
84
|
+
Convert stored messages to the OpenAI input payload.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
messages: List[ResponseMessage] = field(default_factory=list)
|
|
88
|
+
|
|
89
|
+
def add_system_message(
|
|
90
|
+
self, content: ResponseInputMessageContentListParam, **metadata
|
|
91
|
+
) -> None:
|
|
92
|
+
"""Append a system message to the conversation.
|
|
93
|
+
|
|
94
|
+
Parameters
|
|
95
|
+
----------
|
|
96
|
+
content : ResponseInputMessageContentListParam
|
|
97
|
+
System message content in OpenAI format.
|
|
98
|
+
**metadata
|
|
99
|
+
Optional metadata to store with the message.
|
|
100
|
+
|
|
101
|
+
Returns
|
|
102
|
+
-------
|
|
103
|
+
None
|
|
104
|
+
"""
|
|
105
|
+
response_input = cast(
|
|
106
|
+
ResponseInputItemParam, {"role": "system", "content": content}
|
|
107
|
+
)
|
|
108
|
+
self.messages.append(
|
|
109
|
+
ResponseMessage(role="system", content=response_input, metadata=metadata)
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def add_user_message(
|
|
113
|
+
self, input_content: ResponseInputItemParam, **metadata
|
|
114
|
+
) -> None:
|
|
115
|
+
"""Append a user message to the conversation.
|
|
116
|
+
|
|
117
|
+
Parameters
|
|
118
|
+
----------
|
|
119
|
+
input_content : ResponseInputItemParam
|
|
120
|
+
Message payload supplied by the user.
|
|
121
|
+
**metadata
|
|
122
|
+
Optional metadata to store with the message.
|
|
123
|
+
|
|
124
|
+
Returns
|
|
125
|
+
-------
|
|
126
|
+
None
|
|
127
|
+
"""
|
|
128
|
+
self.messages.append(
|
|
129
|
+
ResponseMessage(role="user", content=input_content, metadata=metadata)
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
def add_assistant_message(
|
|
133
|
+
self,
|
|
134
|
+
content: ResponseOutputMessage,
|
|
135
|
+
metadata: Dict[str, Union[str, float, bool]],
|
|
136
|
+
) -> None:
|
|
137
|
+
"""Append an assistant message to the conversation.
|
|
138
|
+
|
|
139
|
+
Parameters
|
|
140
|
+
----------
|
|
141
|
+
content : ResponseOutputMessage
|
|
142
|
+
Assistant response message.
|
|
143
|
+
metadata : dict[str, Union[str, float, bool]]
|
|
144
|
+
Optional metadata to store with the message.
|
|
145
|
+
|
|
146
|
+
Returns
|
|
147
|
+
-------
|
|
148
|
+
None
|
|
149
|
+
"""
|
|
150
|
+
self.messages.append(
|
|
151
|
+
ResponseMessage(role="assistant", content=content, metadata=metadata)
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
def add_tool_message(
|
|
155
|
+
self, content: ResponseFunctionToolCall, output: str, **metadata
|
|
156
|
+
) -> None:
|
|
157
|
+
"""Record a tool call and its output in the conversation history.
|
|
158
|
+
|
|
159
|
+
Parameters
|
|
160
|
+
----------
|
|
161
|
+
content : ResponseFunctionToolCall
|
|
162
|
+
Tool call received from OpenAI.
|
|
163
|
+
output : str
|
|
164
|
+
JSON string returned by the executed tool.
|
|
165
|
+
**metadata
|
|
166
|
+
Optional metadata to store with the message.
|
|
167
|
+
|
|
168
|
+
Returns
|
|
169
|
+
-------
|
|
170
|
+
None
|
|
171
|
+
"""
|
|
172
|
+
tool_call = ResponseToolCall(
|
|
173
|
+
call_id=content.call_id,
|
|
174
|
+
name=content.name,
|
|
175
|
+
arguments=content.arguments,
|
|
176
|
+
output=output,
|
|
177
|
+
)
|
|
178
|
+
function_call, function_call_output = tool_call.to_response_input_item_param()
|
|
179
|
+
self.messages.append(
|
|
180
|
+
ResponseMessage(role="tool", content=function_call, metadata=metadata)
|
|
181
|
+
)
|
|
182
|
+
self.messages.append(
|
|
183
|
+
ResponseMessage(
|
|
184
|
+
role="tool", content=function_call_output, metadata=metadata
|
|
185
|
+
)
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
def to_openai_payload(
|
|
189
|
+
self,
|
|
190
|
+
) -> List[
|
|
191
|
+
ResponseInputItemParam
|
|
192
|
+
| ResponseOutputMessage
|
|
193
|
+
| ResponseFunctionToolCallParam
|
|
194
|
+
| FunctionCallOutput
|
|
195
|
+
| ResponseInputMessageContentListParam
|
|
196
|
+
]:
|
|
197
|
+
"""Convert stored messages to the input payload expected by OpenAI.
|
|
198
|
+
|
|
199
|
+
Notes
|
|
200
|
+
-----
|
|
201
|
+
Assistant messages are model outputs and are not included in the
|
|
202
|
+
next request's input payload.
|
|
203
|
+
|
|
204
|
+
Returns
|
|
205
|
+
-------
|
|
206
|
+
list
|
|
207
|
+
List of message payloads excluding assistant outputs.
|
|
208
|
+
"""
|
|
209
|
+
return [
|
|
210
|
+
msg.to_openai_format() for msg in self.messages if msg.role != "assistant"
|
|
211
|
+
]
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""Convenience runners for response workflows."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
|
|
7
|
+
from typing import Any, Optional, Type, TypeVar
|
|
8
|
+
|
|
9
|
+
from .base import ResponseBase
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
R = TypeVar("R", bound=ResponseBase[Any])
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def run_sync(
|
|
16
|
+
response_cls: Type[R],
|
|
17
|
+
*,
|
|
18
|
+
content: str,
|
|
19
|
+
response_kwargs: Optional[dict[str, Any]] = None,
|
|
20
|
+
) -> Any:
|
|
21
|
+
"""Run a response workflow synchronously and close resources.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
response_cls
|
|
26
|
+
Response class to instantiate.
|
|
27
|
+
content
|
|
28
|
+
Prompt text to send to the OpenAI API.
|
|
29
|
+
response_kwargs
|
|
30
|
+
Keyword arguments forwarded to ``response_cls``. Default ``None``.
|
|
31
|
+
|
|
32
|
+
Returns
|
|
33
|
+
-------
|
|
34
|
+
Any
|
|
35
|
+
Parsed response from :meth:`ResponseBase.run_response`.
|
|
36
|
+
"""
|
|
37
|
+
response = response_cls(**(response_kwargs or {}))
|
|
38
|
+
try:
|
|
39
|
+
return response.run_sync(content=content)
|
|
40
|
+
finally:
|
|
41
|
+
response.close()
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
async def run_async(
|
|
45
|
+
response_cls: Type[R],
|
|
46
|
+
*,
|
|
47
|
+
content: str,
|
|
48
|
+
response_kwargs: Optional[dict[str, Any]] = None,
|
|
49
|
+
) -> Any:
|
|
50
|
+
"""Run a response workflow asynchronously and close resources.
|
|
51
|
+
|
|
52
|
+
Parameters
|
|
53
|
+
----------
|
|
54
|
+
response_cls
|
|
55
|
+
Response class to instantiate.
|
|
56
|
+
content
|
|
57
|
+
Prompt text to send to the OpenAI API.
|
|
58
|
+
response_kwargs
|
|
59
|
+
Keyword arguments forwarded to ``response_cls``. Default ``None``.
|
|
60
|
+
|
|
61
|
+
Returns
|
|
62
|
+
-------
|
|
63
|
+
Any
|
|
64
|
+
Parsed response from :meth:`ResponseBase.run_response_async`.
|
|
65
|
+
"""
|
|
66
|
+
response = response_cls(**(response_kwargs or {}))
|
|
67
|
+
try:
|
|
68
|
+
return await response.run_async(content=content)
|
|
69
|
+
finally:
|
|
70
|
+
response.close()
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def run_streamed(
|
|
74
|
+
response_cls: Type[R],
|
|
75
|
+
*,
|
|
76
|
+
content: str,
|
|
77
|
+
response_kwargs: Optional[dict[str, Any]] = None,
|
|
78
|
+
) -> Any:
|
|
79
|
+
"""Run a response workflow and return the asynchronous result.
|
|
80
|
+
|
|
81
|
+
This mirrors the agent API for discoverability. Streaming responses are not
|
|
82
|
+
currently supported by :class:`ResponseBase`, so this returns the same value
|
|
83
|
+
as :func:`run_async`.
|
|
84
|
+
|
|
85
|
+
Parameters
|
|
86
|
+
----------
|
|
87
|
+
response_cls
|
|
88
|
+
Response class to instantiate.
|
|
89
|
+
content
|
|
90
|
+
Prompt text to send to the OpenAI API.
|
|
91
|
+
response_kwargs
|
|
92
|
+
Keyword arguments forwarded to ``response_cls``. Default ``None``.
|
|
93
|
+
|
|
94
|
+
Returns
|
|
95
|
+
-------
|
|
96
|
+
Any
|
|
97
|
+
Parsed response returned from :func:`run_async`.
|
|
98
|
+
"""
|
|
99
|
+
return asyncio.run(
|
|
100
|
+
run_async(response_cls, content=content, response_kwargs=response_kwargs)
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
__all__ = ["run_sync", "run_async", "run_streamed"]
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""Tool call representation for shared responses."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Tuple
|
|
7
|
+
|
|
8
|
+
from openai.types.responses.response_function_tool_call_param import (
|
|
9
|
+
ResponseFunctionToolCallParam,
|
|
10
|
+
)
|
|
11
|
+
from openai.types.responses.response_input_param import FunctionCallOutput
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class ResponseToolCall:
|
|
16
|
+
"""Container for tool call data used in a conversation.
|
|
17
|
+
|
|
18
|
+
Attributes
|
|
19
|
+
----------
|
|
20
|
+
call_id : str
|
|
21
|
+
Identifier of the tool call.
|
|
22
|
+
name : str
|
|
23
|
+
Name of the tool invoked.
|
|
24
|
+
arguments : str
|
|
25
|
+
JSON string with the arguments passed to the tool.
|
|
26
|
+
output : str
|
|
27
|
+
JSON string representing the result produced by the tool.
|
|
28
|
+
|
|
29
|
+
Methods
|
|
30
|
+
-------
|
|
31
|
+
to_response_input_item_param()
|
|
32
|
+
Convert stored data into OpenAI tool call objects.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
call_id: str
|
|
36
|
+
name: str
|
|
37
|
+
arguments: str
|
|
38
|
+
output: str
|
|
39
|
+
|
|
40
|
+
def to_response_input_item_param(
|
|
41
|
+
self,
|
|
42
|
+
) -> Tuple[ResponseFunctionToolCallParam, FunctionCallOutput]:
|
|
43
|
+
"""Convert stored data into OpenAI tool call objects.
|
|
44
|
+
|
|
45
|
+
Returns
|
|
46
|
+
-------
|
|
47
|
+
tuple[ResponseFunctionToolCallParam, FunctionCallOutput]
|
|
48
|
+
The function call object and the corresponding output object
|
|
49
|
+
suitable for inclusion in an OpenAI request.
|
|
50
|
+
"""
|
|
51
|
+
from typing import cast
|
|
52
|
+
|
|
53
|
+
function_call = cast(
|
|
54
|
+
ResponseFunctionToolCallParam,
|
|
55
|
+
{
|
|
56
|
+
"arguments": self.arguments,
|
|
57
|
+
"call_id": self.call_id,
|
|
58
|
+
"name": self.name,
|
|
59
|
+
"type": "function_call",
|
|
60
|
+
},
|
|
61
|
+
)
|
|
62
|
+
function_call_output = cast(
|
|
63
|
+
FunctionCallOutput,
|
|
64
|
+
{
|
|
65
|
+
"call_id": self.call_id,
|
|
66
|
+
"output": self.output,
|
|
67
|
+
"type": "function_call_output",
|
|
68
|
+
},
|
|
69
|
+
)
|
|
70
|
+
return function_call, function_call_output
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""Helpers for attaching vector stores to responses."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Optional, Sequence
|
|
6
|
+
|
|
7
|
+
from openai import OpenAI
|
|
8
|
+
|
|
9
|
+
from ..utils import ensure_list
|
|
10
|
+
from .base import ResponseBase
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def attach_vector_store(
|
|
14
|
+
response: ResponseBase[Any],
|
|
15
|
+
vector_stores: str | Sequence[str],
|
|
16
|
+
api_key: Optional[str] = None,
|
|
17
|
+
) -> list[str]:
|
|
18
|
+
"""Attach vector stores to a response ``file_search`` tool.
|
|
19
|
+
|
|
20
|
+
Parameters
|
|
21
|
+
----------
|
|
22
|
+
response
|
|
23
|
+
Response instance whose tool configuration is updated.
|
|
24
|
+
vector_stores
|
|
25
|
+
Single vector store name or a sequence of names to attach.
|
|
26
|
+
api_key : str, optional
|
|
27
|
+
API key used when the response does not already have a client. Default
|
|
28
|
+
``None``.
|
|
29
|
+
|
|
30
|
+
Returns
|
|
31
|
+
-------
|
|
32
|
+
list[str]
|
|
33
|
+
Ordered list of vector store IDs applied to the ``file_search`` tool.
|
|
34
|
+
|
|
35
|
+
Raises
|
|
36
|
+
------
|
|
37
|
+
ValueError
|
|
38
|
+
If a vector store cannot be resolved or no API key is available when
|
|
39
|
+
required.
|
|
40
|
+
"""
|
|
41
|
+
requested_stores = ensure_list(vector_stores)
|
|
42
|
+
|
|
43
|
+
client = getattr(response, "_client", None)
|
|
44
|
+
if client is None:
|
|
45
|
+
if api_key is None:
|
|
46
|
+
raise ValueError(
|
|
47
|
+
"OpenAI API key is required to resolve vector store names."
|
|
48
|
+
)
|
|
49
|
+
client = OpenAI(api_key=api_key)
|
|
50
|
+
|
|
51
|
+
available_stores = client.vector_stores.list().data
|
|
52
|
+
resolved_ids: list[str] = []
|
|
53
|
+
|
|
54
|
+
for store in requested_stores:
|
|
55
|
+
match = next(
|
|
56
|
+
(vs.id for vs in available_stores if vs.name == store),
|
|
57
|
+
None,
|
|
58
|
+
)
|
|
59
|
+
if match is None:
|
|
60
|
+
raise ValueError(f"Vector store '{store}' not found.")
|
|
61
|
+
if match not in resolved_ids:
|
|
62
|
+
resolved_ids.append(match)
|
|
63
|
+
|
|
64
|
+
file_search_tool = next(
|
|
65
|
+
(tool for tool in response._tools if tool.get("type") == "file_search"),
|
|
66
|
+
None,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
if file_search_tool is None:
|
|
70
|
+
response._tools.append(
|
|
71
|
+
{"type": "file_search", "vector_store_ids": resolved_ids}
|
|
72
|
+
)
|
|
73
|
+
return resolved_ids
|
|
74
|
+
|
|
75
|
+
existing_ids = ensure_list(file_search_tool.get("vector_store_ids", []))
|
|
76
|
+
combined_ids = existing_ids.copy()
|
|
77
|
+
for vector_store_id in resolved_ids:
|
|
78
|
+
if vector_store_id not in combined_ids:
|
|
79
|
+
combined_ids.append(vector_store_id)
|
|
80
|
+
file_search_tool["vector_store_ids"] = combined_ids
|
|
81
|
+
return combined_ids
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
__all__ = ["attach_vector_store"]
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""Shared structured output models and base helpers."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .agent_blueprint import AgentBlueprint
|
|
6
|
+
from .plan import *
|
|
7
|
+
from .base import *
|
|
8
|
+
from .prompt import PromptStructure
|
|
9
|
+
from .responses import *
|
|
10
|
+
from .summary import *
|
|
11
|
+
from .vector_search import *
|
|
12
|
+
from .validation import ValidationResultStructure
|
|
13
|
+
from .web_search import *
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"BaseStructure",
|
|
17
|
+
"SchemaOptions",
|
|
18
|
+
"spec_field",
|
|
19
|
+
"AgentBlueprint",
|
|
20
|
+
"AgentEnum",
|
|
21
|
+
"TaskStructure",
|
|
22
|
+
"PlanStructure",
|
|
23
|
+
"PromptStructure",
|
|
24
|
+
"SummaryTopic",
|
|
25
|
+
"SummaryStructure",
|
|
26
|
+
"ExtendedSummaryStructure",
|
|
27
|
+
"WebSearchStructure",
|
|
28
|
+
"WebSearchPlanStructure",
|
|
29
|
+
"WebSearchItemStructure",
|
|
30
|
+
"WebSearchItemResultStructure",
|
|
31
|
+
"WebSearchReportStructure",
|
|
32
|
+
"VectorSearchReportStructure",
|
|
33
|
+
"VectorSearchItemStructure",
|
|
34
|
+
"VectorSearchItemResultStructure",
|
|
35
|
+
"VectorSearchItemResultsStructure",
|
|
36
|
+
"VectorSearchPlanStructure",
|
|
37
|
+
"VectorSearchStructure",
|
|
38
|
+
"ValidationResultStructure",
|
|
39
|
+
"assistant_tool_definition",
|
|
40
|
+
"assistant_format",
|
|
41
|
+
"response_tool_definition",
|
|
42
|
+
"response_format",
|
|
43
|
+
]
|