openai-sdk-helpers 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +85 -10
- openai_sdk_helpers/agent/__init__.py +8 -4
- openai_sdk_helpers/agent/base.py +81 -46
- openai_sdk_helpers/agent/config.py +6 -4
- openai_sdk_helpers/agent/{project_manager.py → coordination.py} +29 -45
- openai_sdk_helpers/agent/prompt_utils.py +7 -1
- openai_sdk_helpers/agent/runner.py +67 -141
- openai_sdk_helpers/agent/search/__init__.py +33 -0
- openai_sdk_helpers/agent/search/base.py +297 -0
- openai_sdk_helpers/agent/{vector_search.py → search/vector.py} +89 -157
- openai_sdk_helpers/agent/{web_search.py → search/web.py} +82 -162
- openai_sdk_helpers/agent/summarizer.py +29 -8
- openai_sdk_helpers/agent/translator.py +40 -13
- openai_sdk_helpers/agent/validation.py +32 -8
- openai_sdk_helpers/async_utils.py +132 -0
- openai_sdk_helpers/config.py +74 -36
- openai_sdk_helpers/context_manager.py +241 -0
- openai_sdk_helpers/enums/__init__.py +9 -1
- openai_sdk_helpers/enums/base.py +67 -8
- openai_sdk_helpers/environment.py +33 -6
- openai_sdk_helpers/errors.py +133 -0
- openai_sdk_helpers/logging_config.py +105 -0
- openai_sdk_helpers/prompt/__init__.py +10 -71
- openai_sdk_helpers/prompt/base.py +172 -0
- openai_sdk_helpers/response/__init__.py +37 -5
- openai_sdk_helpers/response/base.py +427 -189
- openai_sdk_helpers/response/config.py +176 -0
- openai_sdk_helpers/response/messages.py +104 -40
- openai_sdk_helpers/response/runner.py +79 -35
- openai_sdk_helpers/response/tool_call.py +75 -12
- openai_sdk_helpers/response/vector_store.py +29 -16
- openai_sdk_helpers/retry.py +175 -0
- openai_sdk_helpers/streamlit_app/__init__.py +30 -0
- openai_sdk_helpers/streamlit_app/app.py +345 -0
- openai_sdk_helpers/streamlit_app/config.py +502 -0
- openai_sdk_helpers/streamlit_app/streamlit_web_search.py +68 -0
- openai_sdk_helpers/structure/__init__.py +69 -3
- openai_sdk_helpers/structure/agent_blueprint.py +82 -19
- openai_sdk_helpers/structure/base.py +245 -91
- openai_sdk_helpers/structure/plan/__init__.py +15 -1
- openai_sdk_helpers/structure/plan/enum.py +41 -5
- openai_sdk_helpers/structure/plan/plan.py +101 -45
- openai_sdk_helpers/structure/plan/task.py +38 -6
- openai_sdk_helpers/structure/prompt.py +21 -2
- openai_sdk_helpers/structure/responses.py +52 -11
- openai_sdk_helpers/structure/summary.py +55 -7
- openai_sdk_helpers/structure/validation.py +34 -6
- openai_sdk_helpers/structure/vector_search.py +132 -18
- openai_sdk_helpers/structure/web_search.py +128 -12
- openai_sdk_helpers/types.py +57 -0
- openai_sdk_helpers/utils/__init__.py +32 -1
- openai_sdk_helpers/utils/core.py +200 -32
- openai_sdk_helpers/validation.py +302 -0
- openai_sdk_helpers/vector_storage/__init__.py +21 -1
- openai_sdk_helpers/vector_storage/cleanup.py +25 -13
- openai_sdk_helpers/vector_storage/storage.py +124 -66
- openai_sdk_helpers/vector_storage/types.py +20 -19
- openai_sdk_helpers-0.0.9.dist-info/METADATA +550 -0
- openai_sdk_helpers-0.0.9.dist-info/RECORD +66 -0
- openai_sdk_helpers-0.0.7.dist-info/METADATA +0 -193
- openai_sdk_helpers-0.0.7.dist-info/RECORD +0 -51
- {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.9.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.9.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,4 +1,10 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""Core response management for OpenAI API interactions.
|
|
2
|
+
|
|
3
|
+
This module implements the BaseResponse class, which manages the complete
|
|
4
|
+
lifecycle of OpenAI API interactions including input construction, tool
|
|
5
|
+
execution, message history, vector store attachments, and structured output
|
|
6
|
+
parsing.
|
|
7
|
+
"""
|
|
2
8
|
|
|
3
9
|
from __future__ import annotations
|
|
4
10
|
|
|
@@ -10,19 +16,15 @@ import threading
|
|
|
10
16
|
import uuid
|
|
11
17
|
from pathlib import Path
|
|
12
18
|
from typing import (
|
|
19
|
+
TYPE_CHECKING,
|
|
13
20
|
Any,
|
|
14
21
|
Callable,
|
|
15
22
|
Generic,
|
|
16
|
-
|
|
17
|
-
Optional,
|
|
18
|
-
Tuple,
|
|
19
|
-
Type,
|
|
23
|
+
Sequence,
|
|
20
24
|
TypeVar,
|
|
21
|
-
Union,
|
|
22
25
|
cast,
|
|
23
26
|
)
|
|
24
27
|
|
|
25
|
-
from openai import OpenAI
|
|
26
28
|
from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
|
|
27
29
|
from openai.types.responses.response_input_file_param import ResponseInputFileParam
|
|
28
30
|
from openai.types.responses.response_input_message_content_list_param import (
|
|
@@ -32,119 +34,173 @@ from openai.types.responses.response_input_param import ResponseInputItemParam
|
|
|
32
34
|
from openai.types.responses.response_input_text_param import ResponseInputTextParam
|
|
33
35
|
from openai.types.responses.response_output_message import ResponseOutputMessage
|
|
34
36
|
|
|
35
|
-
from .messages import ResponseMessages
|
|
37
|
+
from .messages import ResponseMessage, ResponseMessages
|
|
38
|
+
from ..config import OpenAISettings
|
|
36
39
|
from ..structure import BaseStructure
|
|
40
|
+
from ..types import OpenAIClient
|
|
37
41
|
from ..utils import ensure_list, log
|
|
38
42
|
|
|
43
|
+
if TYPE_CHECKING: # pragma: no cover - only for typing hints
|
|
44
|
+
from openai_sdk_helpers.streamlit_app.config import StreamlitAppConfig
|
|
45
|
+
|
|
39
46
|
T = TypeVar("T", bound=BaseStructure)
|
|
40
|
-
ToolHandler = Callable[[ResponseFunctionToolCall],
|
|
41
|
-
ProcessContent = Callable[[str],
|
|
47
|
+
ToolHandler = Callable[[ResponseFunctionToolCall], str | Any]
|
|
48
|
+
ProcessContent = Callable[[str], tuple[str, list[str]]]
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
RB = TypeVar("RB", bound="BaseResponse[BaseStructure]")
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class BaseResponse(Generic[T]):
|
|
55
|
+
"""Manage OpenAI API interactions for structured responses.
|
|
42
56
|
|
|
57
|
+
Orchestrates the complete lifecycle of OpenAI API requests including
|
|
58
|
+
input construction, tool execution, message history management, vector
|
|
59
|
+
store attachments, and structured output parsing. Supports both
|
|
60
|
+
synchronous and asynchronous execution with automatic resource cleanup.
|
|
43
61
|
|
|
44
|
-
class
|
|
45
|
-
|
|
62
|
+
The class handles conversation state, tool calls with custom handlers,
|
|
63
|
+
file attachments via vector stores, and optional parsing into typed
|
|
64
|
+
structured output models. Sessions can be persisted to disk and restored.
|
|
46
65
|
|
|
47
|
-
|
|
48
|
-
|
|
66
|
+
Attributes
|
|
67
|
+
----------
|
|
68
|
+
uuid : UUID
|
|
69
|
+
Unique identifier for this response session.
|
|
70
|
+
name : str
|
|
71
|
+
Lowercase class name used for path construction.
|
|
72
|
+
messages : ResponseMessages
|
|
73
|
+
Complete message history for this session.
|
|
49
74
|
|
|
50
75
|
Methods
|
|
51
76
|
-------
|
|
52
|
-
run_async(content, attachments)
|
|
77
|
+
run_async(content, attachments=None)
|
|
53
78
|
Generate a response asynchronously and return parsed output.
|
|
54
|
-
run_sync(content, attachments)
|
|
55
|
-
|
|
56
|
-
run_streamed(content, attachments)
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
79
|
+
run_sync(content, attachments=None)
|
|
80
|
+
Execute run_async synchronously with thread management.
|
|
81
|
+
run_streamed(content, attachments=None)
|
|
82
|
+
Execute run_async and await the result (streaming not yet supported).
|
|
83
|
+
get_last_tool_message()
|
|
84
|
+
Return the most recent tool message or None.
|
|
85
|
+
get_last_user_message()
|
|
86
|
+
Return the most recent user message or None.
|
|
87
|
+
get_last_assistant_message()
|
|
88
|
+
Return the most recent assistant message or None.
|
|
89
|
+
build_streamlit_config(**kwargs)
|
|
90
|
+
Construct a StreamlitAppConfig using this class as the builder.
|
|
91
|
+
save(filepath=None)
|
|
92
|
+
Serialize the message history to a JSON file.
|
|
60
93
|
close()
|
|
61
|
-
Clean up remote resources
|
|
94
|
+
Clean up remote resources including vector stores.
|
|
95
|
+
|
|
96
|
+
Examples
|
|
97
|
+
--------
|
|
98
|
+
>>> from openai_sdk_helpers import BaseResponse, OpenAISettings
|
|
99
|
+
>>> settings = OpenAISettings(api_key="...", default_model="gpt-4")
|
|
100
|
+
>>> response = BaseResponse(
|
|
101
|
+
... instructions="You are a helpful assistant",
|
|
102
|
+
... tools=None,
|
|
103
|
+
... output_structure=None,
|
|
104
|
+
... tool_handlers={},
|
|
105
|
+
... openai_settings=settings
|
|
106
|
+
... )
|
|
107
|
+
>>> result = response.run_sync("Hello, world!")
|
|
108
|
+
>>> response.close()
|
|
62
109
|
"""
|
|
63
110
|
|
|
64
111
|
def __init__(
|
|
65
112
|
self,
|
|
66
113
|
*,
|
|
67
114
|
instructions: str,
|
|
68
|
-
tools:
|
|
69
|
-
|
|
70
|
-
output_structure: Optional[Type[T]],
|
|
115
|
+
tools: list | None,
|
|
116
|
+
output_structure: type[T] | None,
|
|
71
117
|
tool_handlers: dict[str, ToolHandler],
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
attachments: Optional[Union[Tuple[str, str], list[Tuple[str, str]]]] = None,
|
|
79
|
-
data_path_fn: Optional[Callable[[str], Path]] = None,
|
|
80
|
-
save_path: Optional[Path | str] = None,
|
|
118
|
+
openai_settings: OpenAISettings,
|
|
119
|
+
process_content: ProcessContent | None = None,
|
|
120
|
+
name: str | None = None,
|
|
121
|
+
system_vector_store: list[str] | None = None,
|
|
122
|
+
data_path_fn: Callable[[str], Path] | None = None,
|
|
123
|
+
save_path: Path | str | None = None,
|
|
81
124
|
) -> None:
|
|
82
|
-
"""Initialize a response session.
|
|
125
|
+
"""Initialize a response session with OpenAI configuration.
|
|
126
|
+
|
|
127
|
+
Sets up the OpenAI client, message history, vector stores, and tool
|
|
128
|
+
handlers for a complete response workflow. The session can optionally
|
|
129
|
+
be persisted to disk for later restoration.
|
|
83
130
|
|
|
84
131
|
Parameters
|
|
85
132
|
----------
|
|
86
133
|
instructions : str
|
|
87
|
-
System instructions
|
|
134
|
+
System instructions provided to the OpenAI API for context.
|
|
88
135
|
tools : list or None
|
|
89
|
-
Tool definitions for the OpenAI request.
|
|
90
|
-
schema : object or None
|
|
91
|
-
Optional response schema configuration.
|
|
136
|
+
Tool definitions for the OpenAI API request. Pass None for no tools.
|
|
92
137
|
output_structure : type[BaseStructure] or None
|
|
93
|
-
Structure
|
|
138
|
+
Structure class used to parse tool call outputs. When provided,
|
|
139
|
+
the schema is automatically generated using the structure's
|
|
140
|
+
response_format() method. Pass None for unstructured responses.
|
|
94
141
|
tool_handlers : dict[str, ToolHandler]
|
|
95
|
-
Mapping
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
save_path : Path | str or None, default=None
|
|
113
|
-
Optional path to a directory or file for persisted messages.
|
|
142
|
+
Mapping from tool names to callable handlers. Each handler receives
|
|
143
|
+
a ResponseFunctionToolCall and returns a string or any serializable
|
|
144
|
+
result.
|
|
145
|
+
openai_settings : OpenAISettings
|
|
146
|
+
Fully configured OpenAI settings with API key and default model.
|
|
147
|
+
process_content : callable or None, default None
|
|
148
|
+
Optional callback that processes input text and extracts file
|
|
149
|
+
attachments. Must return a tuple of (processed_text, attachment_list).
|
|
150
|
+
name : str or None, default None
|
|
151
|
+
Module name used for data path construction when data_path_fn is set.
|
|
152
|
+
system_vector_store : list[str] or None, default None
|
|
153
|
+
Optional list of vector store names to attach as system context.
|
|
154
|
+
data_path_fn : callable or None, default None
|
|
155
|
+
Function mapping name to a base directory path for artifact storage.
|
|
156
|
+
save_path : Path, str, or None, default None
|
|
157
|
+
Optional path to a directory or file where message history is saved.
|
|
158
|
+
If a directory, files are named using the session UUID.
|
|
114
159
|
|
|
115
160
|
Raises
|
|
116
161
|
------
|
|
117
162
|
ValueError
|
|
118
|
-
If
|
|
163
|
+
If api_key is missing from openai_settings.
|
|
164
|
+
If default_model is missing from openai_settings.
|
|
119
165
|
RuntimeError
|
|
120
166
|
If the OpenAI client fails to initialize.
|
|
167
|
+
|
|
168
|
+
Examples
|
|
169
|
+
--------
|
|
170
|
+
>>> from openai_sdk_helpers import BaseResponse, OpenAISettings
|
|
171
|
+
>>> settings = OpenAISettings(api_key="sk-...", default_model="gpt-4")
|
|
172
|
+
>>> response = BaseResponse(
|
|
173
|
+
... instructions="You are helpful",
|
|
174
|
+
... tools=None,
|
|
175
|
+
... output_structure=None,
|
|
176
|
+
... tool_handlers={},
|
|
177
|
+
... openai_settings=settings
|
|
178
|
+
... )
|
|
121
179
|
"""
|
|
122
180
|
self._tool_handlers = tool_handlers
|
|
123
181
|
self._process_content = process_content
|
|
124
|
-
self.
|
|
125
|
-
self._vector_storage_cls = vector_storage_cls
|
|
182
|
+
self._name = name
|
|
126
183
|
self._data_path_fn = data_path_fn
|
|
127
184
|
self._save_path = Path(save_path) if save_path is not None else None
|
|
128
185
|
self._instructions = instructions
|
|
129
186
|
self._tools = tools if tools is not None else []
|
|
130
|
-
self._schema = schema
|
|
131
187
|
self._output_structure = output_structure
|
|
132
|
-
self.
|
|
133
|
-
self._cleanup_system_vector_storage = False
|
|
134
|
-
|
|
135
|
-
if client is None:
|
|
136
|
-
if api_key is None:
|
|
137
|
-
raise ValueError("OpenAI API key is required")
|
|
138
|
-
try:
|
|
139
|
-
self._client = OpenAI(api_key=api_key)
|
|
140
|
-
except Exception as exc:
|
|
141
|
-
raise RuntimeError("Failed to initialize OpenAI client") from exc
|
|
142
|
-
else:
|
|
143
|
-
self._client = client
|
|
188
|
+
self._openai_settings = openai_settings
|
|
144
189
|
|
|
145
|
-
self.
|
|
190
|
+
if not self._openai_settings.api_key:
|
|
191
|
+
raise ValueError("OpenAI API key is required")
|
|
192
|
+
|
|
193
|
+
self._client: OpenAIClient
|
|
194
|
+
try:
|
|
195
|
+
self._client = self._openai_settings.create_client()
|
|
196
|
+
except Exception as exc: # pragma: no cover - defensive guard
|
|
197
|
+
raise RuntimeError("Failed to initialize OpenAI client") from exc
|
|
198
|
+
|
|
199
|
+
self._model = self._openai_settings.default_model
|
|
146
200
|
if not self._model:
|
|
147
|
-
raise ValueError(
|
|
201
|
+
raise ValueError(
|
|
202
|
+
"OpenAI model is required. Set 'default_model' on OpenAISettings."
|
|
203
|
+
)
|
|
148
204
|
|
|
149
205
|
self.uuid = uuid.uuid4()
|
|
150
206
|
self.name = self.__class__.__name__.lower()
|
|
@@ -153,78 +209,82 @@ class ResponseBase(Generic[T]):
|
|
|
153
209
|
ResponseInputTextParam(type="input_text", text=instructions)
|
|
154
210
|
]
|
|
155
211
|
|
|
156
|
-
self.
|
|
157
|
-
|
|
212
|
+
self._user_vector_storage: Any | None = None
|
|
213
|
+
|
|
214
|
+
# New logic: system_vector_store is a list of vector store names to attach
|
|
215
|
+
if system_vector_store:
|
|
216
|
+
from .vector_store import attach_vector_store
|
|
158
217
|
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
218
|
+
attach_vector_store(
|
|
219
|
+
self,
|
|
220
|
+
system_vector_store,
|
|
221
|
+
api_key=(
|
|
222
|
+
self._client.api_key
|
|
223
|
+
if hasattr(self._client, "api_key")
|
|
224
|
+
else self._openai_settings.api_key
|
|
225
|
+
),
|
|
166
226
|
)
|
|
167
|
-
self._cleanup_system_vector_storage = True
|
|
168
|
-
system_vector_storage = cast(Any, self._system_vector_storage)
|
|
169
|
-
for file_path, tool_type in attachments:
|
|
170
|
-
uploaded_file = system_vector_storage.upload_file(file_path=file_path)
|
|
171
|
-
self.file_objects.setdefault(tool_type, []).append(uploaded_file.id)
|
|
172
|
-
|
|
173
|
-
self.tool_resources = {}
|
|
174
|
-
required_tools = []
|
|
175
|
-
|
|
176
|
-
for tool_type, file_ids in self.file_objects.items():
|
|
177
|
-
required_tools.append({"type": tool_type})
|
|
178
|
-
self.tool_resources[tool_type] = {"file_ids": file_ids}
|
|
179
|
-
if tool_type == "file_search":
|
|
180
|
-
self.tool_resources[tool_type]["vector_store_ids"] = [
|
|
181
|
-
system_vector_storage.id
|
|
182
|
-
]
|
|
183
|
-
|
|
184
|
-
existing_tool_types = {tool["type"] for tool in self._tools}
|
|
185
|
-
for tool in required_tools:
|
|
186
|
-
tool_type = tool["type"]
|
|
187
|
-
if tool_type == "file_search":
|
|
188
|
-
tool["vector_store_ids"] = [system_vector_storage.id]
|
|
189
|
-
if tool_type not in existing_tool_types:
|
|
190
|
-
self._tools.append(tool)
|
|
191
227
|
|
|
192
228
|
self.messages = ResponseMessages()
|
|
193
229
|
self.messages.add_system_message(content=system_content)
|
|
194
230
|
if self._save_path is not None or (
|
|
195
|
-
self._data_path_fn is not None and self.
|
|
231
|
+
self._data_path_fn is not None and self._name is not None
|
|
196
232
|
):
|
|
197
233
|
self.save()
|
|
198
234
|
|
|
199
235
|
@property
|
|
200
236
|
def data_path(self) -> Path:
|
|
201
|
-
"""Return the directory
|
|
237
|
+
"""Return the directory for persisting session artifacts.
|
|
238
|
+
|
|
239
|
+
Constructs a path using data_path_fn, name, class name, and the
|
|
240
|
+
session name. Both data_path_fn and name must be set during
|
|
241
|
+
initialization for this property to work.
|
|
202
242
|
|
|
203
243
|
Returns
|
|
204
244
|
-------
|
|
205
245
|
Path
|
|
206
|
-
Absolute path for persisting response artifacts.
|
|
246
|
+
Absolute path for persisting response artifacts and message history.
|
|
247
|
+
|
|
248
|
+
Raises
|
|
249
|
+
------
|
|
250
|
+
RuntimeError
|
|
251
|
+
If data_path_fn or name were not provided during initialization.
|
|
252
|
+
|
|
253
|
+
Examples
|
|
254
|
+
--------
|
|
255
|
+
>>> response.data_path
|
|
256
|
+
PosixPath('/data/myapp/baseresponse/session_123')
|
|
207
257
|
"""
|
|
208
|
-
if self._data_path_fn is None or self.
|
|
258
|
+
if self._data_path_fn is None or self._name is None:
|
|
209
259
|
raise RuntimeError(
|
|
210
|
-
"data_path_fn and
|
|
260
|
+
"data_path_fn and name are required to build data paths."
|
|
211
261
|
)
|
|
212
|
-
base_path = self._data_path_fn(self.
|
|
262
|
+
base_path = self._data_path_fn(self._name)
|
|
213
263
|
return base_path / self.__class__.__name__.lower() / self.name
|
|
214
264
|
|
|
215
265
|
def _build_input(
|
|
216
266
|
self,
|
|
217
|
-
content:
|
|
218
|
-
attachments:
|
|
267
|
+
content: str | list[str],
|
|
268
|
+
attachments: list[str] | None = None,
|
|
219
269
|
) -> None:
|
|
220
|
-
"""
|
|
270
|
+
"""Construct input messages for the OpenAI API request.
|
|
271
|
+
|
|
272
|
+
Processes content through the optional process_content callback,
|
|
273
|
+
uploads any file attachments to vector stores, and adds all
|
|
274
|
+
messages to the conversation history.
|
|
221
275
|
|
|
222
276
|
Parameters
|
|
223
277
|
----------
|
|
224
|
-
content
|
|
278
|
+
content : str or list[str]
|
|
225
279
|
String or list of strings to include as user messages.
|
|
226
|
-
attachments
|
|
227
|
-
Optional list of file paths to upload and attach.
|
|
280
|
+
attachments : list[str] or None, default None
|
|
281
|
+
Optional list of file paths to upload and attach to the message.
|
|
282
|
+
|
|
283
|
+
Notes
|
|
284
|
+
-----
|
|
285
|
+
If attachments are provided and no user vector storage exists, this
|
|
286
|
+
method automatically creates one and adds a file_search tool to
|
|
287
|
+
the tools list.
|
|
228
288
|
"""
|
|
229
289
|
contents = ensure_list(content)
|
|
230
290
|
|
|
@@ -233,25 +293,22 @@ class ResponseBase(Generic[T]):
|
|
|
233
293
|
processed_text, content_attachments = raw_content, []
|
|
234
294
|
else:
|
|
235
295
|
processed_text, content_attachments = self._process_content(raw_content)
|
|
236
|
-
input_content:
|
|
237
|
-
|
|
238
|
-
]
|
|
296
|
+
input_content: list[ResponseInputTextParam | ResponseInputFileParam] = [
|
|
297
|
+
ResponseInputTextParam(type="input_text", text=processed_text)
|
|
298
|
+
]
|
|
239
299
|
|
|
240
300
|
all_attachments = (attachments or []) + content_attachments
|
|
241
301
|
|
|
242
302
|
for file_path in all_attachments:
|
|
243
303
|
if self._user_vector_storage is None:
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
"vector_storage_cls is required for attachments."
|
|
247
|
-
)
|
|
304
|
+
from openai_sdk_helpers.vector_storage import VectorStorage
|
|
305
|
+
|
|
248
306
|
store_name = f"{self.__class__.__name__.lower()}_{self.name}_{self.uuid}_user"
|
|
249
|
-
self._user_vector_storage =
|
|
307
|
+
self._user_vector_storage = VectorStorage(
|
|
250
308
|
store_name=store_name,
|
|
251
309
|
client=self._client,
|
|
252
310
|
model=self._model,
|
|
253
311
|
)
|
|
254
|
-
self._cleanup_user_vector_storage = True
|
|
255
312
|
user_vector_storage = cast(Any, self._user_vector_storage)
|
|
256
313
|
if not any(
|
|
257
314
|
tool.get("type") == "file_search" for tool in self._tools
|
|
@@ -263,13 +320,8 @@ class ResponseBase(Generic[T]):
|
|
|
263
320
|
}
|
|
264
321
|
)
|
|
265
322
|
else:
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
if self._system_vector_storage is not None:
|
|
269
|
-
tool["vector_store_ids"] = [
|
|
270
|
-
cast(Any, self._system_vector_storage).id,
|
|
271
|
-
user_vector_storage.id,
|
|
272
|
-
]
|
|
323
|
+
# If system vector store is attached, its ID will be in tool config
|
|
324
|
+
pass
|
|
273
325
|
user_vector_storage = cast(Any, self._user_vector_storage)
|
|
274
326
|
uploaded_file = user_vector_storage.upload_file(file_path)
|
|
275
327
|
input_content.append(
|
|
@@ -284,32 +336,43 @@ class ResponseBase(Generic[T]):
|
|
|
284
336
|
|
|
285
337
|
async def run_async(
|
|
286
338
|
self,
|
|
287
|
-
content:
|
|
288
|
-
attachments:
|
|
289
|
-
) ->
|
|
290
|
-
"""Generate a response asynchronously.
|
|
339
|
+
content: str | list[str],
|
|
340
|
+
attachments: str | list[str] | None = None,
|
|
341
|
+
) -> T | None:
|
|
342
|
+
"""Generate a response asynchronously from the OpenAI API.
|
|
343
|
+
|
|
344
|
+
Builds input messages, sends the request to OpenAI, processes any
|
|
345
|
+
tool calls with registered handlers, and optionally parses the
|
|
346
|
+
result into the configured output_structure.
|
|
291
347
|
|
|
292
348
|
Parameters
|
|
293
349
|
----------
|
|
294
|
-
content
|
|
295
|
-
Prompt text or list of texts.
|
|
296
|
-
attachments
|
|
297
|
-
Optional file path or list of paths to upload and attach.
|
|
350
|
+
content : str or list[str]
|
|
351
|
+
Prompt text or list of prompt texts to send.
|
|
352
|
+
attachments : str, list[str], or None, default None
|
|
353
|
+
Optional file path or list of file paths to upload and attach.
|
|
298
354
|
|
|
299
355
|
Returns
|
|
300
356
|
-------
|
|
301
|
-
|
|
302
|
-
Parsed response object or
|
|
357
|
+
T or None
|
|
358
|
+
Parsed response object of type output_structure, or None if
|
|
359
|
+
no structured output was produced.
|
|
303
360
|
|
|
304
361
|
Raises
|
|
305
362
|
------
|
|
306
363
|
RuntimeError
|
|
307
|
-
If the API returns no output
|
|
364
|
+
If the API returns no output.
|
|
365
|
+
If a tool handler raises an exception.
|
|
308
366
|
ValueError
|
|
309
|
-
If
|
|
367
|
+
If the API invokes a tool with no registered handler.
|
|
368
|
+
|
|
369
|
+
Examples
|
|
370
|
+
--------
|
|
371
|
+
>>> result = await response.run_async("Analyze this text")
|
|
372
|
+
>>> print(result)
|
|
310
373
|
"""
|
|
311
374
|
log(f"{self.__class__.__name__}::run_response")
|
|
312
|
-
parsed_result:
|
|
375
|
+
parsed_result: T | None = None
|
|
313
376
|
|
|
314
377
|
self._build_input(
|
|
315
378
|
content=content,
|
|
@@ -320,8 +383,8 @@ class ResponseBase(Generic[T]):
|
|
|
320
383
|
"input": self.messages.to_openai_payload(),
|
|
321
384
|
"model": self._model,
|
|
322
385
|
}
|
|
323
|
-
if self.
|
|
324
|
-
kwargs["text"] = self.
|
|
386
|
+
if not self._tools and self._output_structure is not None:
|
|
387
|
+
kwargs["text"] = self._output_structure.response_format()
|
|
325
388
|
|
|
326
389
|
if self._tools:
|
|
327
390
|
kwargs["tools"] = self._tools
|
|
@@ -387,7 +450,7 @@ class ResponseBase(Generic[T]):
|
|
|
387
450
|
log("No tool call. Parsing output_text.")
|
|
388
451
|
try:
|
|
389
452
|
output_dict = json.loads(raw_text)
|
|
390
|
-
if self._output_structure
|
|
453
|
+
if self._output_structure:
|
|
391
454
|
return self._output_structure.from_raw_input(output_dict)
|
|
392
455
|
return output_dict
|
|
393
456
|
except Exception:
|
|
@@ -398,19 +461,41 @@ class ResponseBase(Generic[T]):
|
|
|
398
461
|
|
|
399
462
|
def run_sync(
|
|
400
463
|
self,
|
|
401
|
-
content:
|
|
402
|
-
attachments:
|
|
403
|
-
) ->
|
|
404
|
-
"""
|
|
464
|
+
content: str | list[str],
|
|
465
|
+
attachments: str | list[str] | None = None,
|
|
466
|
+
) -> T | None:
|
|
467
|
+
"""Execute run_async synchronously with proper event loop handling.
|
|
468
|
+
|
|
469
|
+
Automatically detects if an event loop is already running and uses
|
|
470
|
+
a separate thread if necessary. This enables safe usage in both
|
|
471
|
+
synchronous and asynchronous contexts.
|
|
472
|
+
|
|
473
|
+
Parameters
|
|
474
|
+
----------
|
|
475
|
+
content : str or list[str]
|
|
476
|
+
Prompt text or list of prompt texts to send.
|
|
477
|
+
attachments : str, list[str], or None, default None
|
|
478
|
+
Optional file path or list of file paths to upload and attach.
|
|
479
|
+
|
|
480
|
+
Returns
|
|
481
|
+
-------
|
|
482
|
+
T or None
|
|
483
|
+
Parsed response object of type output_structure, or None.
|
|
484
|
+
|
|
485
|
+
Examples
|
|
486
|
+
--------
|
|
487
|
+
>>> result = response.run_sync("Summarize this document")
|
|
488
|
+
>>> print(result)
|
|
489
|
+
"""
|
|
405
490
|
|
|
406
|
-
async def runner() ->
|
|
491
|
+
async def runner() -> T | None:
|
|
407
492
|
return await self.run_async(content=content, attachments=attachments)
|
|
408
493
|
|
|
409
494
|
try:
|
|
410
495
|
asyncio.get_running_loop()
|
|
411
496
|
except RuntimeError:
|
|
412
497
|
return asyncio.run(runner())
|
|
413
|
-
result:
|
|
498
|
+
result: T | None = None
|
|
414
499
|
|
|
415
500
|
def _thread_func() -> None:
|
|
416
501
|
nonlocal result
|
|
@@ -423,30 +508,146 @@ class ResponseBase(Generic[T]):
|
|
|
423
508
|
|
|
424
509
|
def run_streamed(
|
|
425
510
|
self,
|
|
426
|
-
content:
|
|
427
|
-
attachments:
|
|
428
|
-
) ->
|
|
429
|
-
"""
|
|
511
|
+
content: str | list[str],
|
|
512
|
+
attachments: str | list[str] | None = None,
|
|
513
|
+
) -> T | None:
|
|
514
|
+
"""Execute run_async and await the result.
|
|
430
515
|
|
|
431
|
-
Streaming
|
|
432
|
-
awaits
|
|
516
|
+
Streaming responses are not yet fully supported, so this method
|
|
517
|
+
simply awaits run_async to provide API compatibility with agent
|
|
518
|
+
interfaces.
|
|
433
519
|
|
|
434
520
|
Parameters
|
|
435
521
|
----------
|
|
436
|
-
content
|
|
437
|
-
Prompt text or list of texts.
|
|
438
|
-
attachments
|
|
439
|
-
Optional file path or list of paths to upload and attach.
|
|
522
|
+
content : str or list[str]
|
|
523
|
+
Prompt text or list of prompt texts to send.
|
|
524
|
+
attachments : str, list[str], or None, default None
|
|
525
|
+
Optional file path or list of file paths to upload and attach.
|
|
440
526
|
|
|
441
527
|
Returns
|
|
442
528
|
-------
|
|
443
|
-
|
|
444
|
-
Parsed response object or
|
|
529
|
+
T or None
|
|
530
|
+
Parsed response object of type output_structure, or None.
|
|
531
|
+
|
|
532
|
+
Notes
|
|
533
|
+
-----
|
|
534
|
+
This method exists for API consistency but does not currently
|
|
535
|
+
provide true streaming functionality.
|
|
445
536
|
"""
|
|
446
537
|
return asyncio.run(self.run_async(content=content, attachments=attachments))
|
|
447
538
|
|
|
448
|
-
def
|
|
449
|
-
"""
|
|
539
|
+
def get_last_tool_message(self) -> ResponseMessage | None:
|
|
540
|
+
"""Return the most recent tool message from conversation history.
|
|
541
|
+
|
|
542
|
+
Returns
|
|
543
|
+
-------
|
|
544
|
+
ResponseMessage or None
|
|
545
|
+
Latest tool message, or None if no tool messages exist.
|
|
546
|
+
"""
|
|
547
|
+
return self.messages.get_last_tool_message()
|
|
548
|
+
|
|
549
|
+
def get_last_user_message(self) -> ResponseMessage | None:
|
|
550
|
+
"""Return the most recent user message from conversation history.
|
|
551
|
+
|
|
552
|
+
Returns
|
|
553
|
+
-------
|
|
554
|
+
ResponseMessage or None
|
|
555
|
+
Latest user message, or None if no user messages exist.
|
|
556
|
+
"""
|
|
557
|
+
return self.messages.get_last_user_message()
|
|
558
|
+
|
|
559
|
+
def get_last_assistant_message(self) -> ResponseMessage | None:
|
|
560
|
+
"""Return the most recent assistant message from conversation history.
|
|
561
|
+
|
|
562
|
+
Returns
|
|
563
|
+
-------
|
|
564
|
+
ResponseMessage or None
|
|
565
|
+
Latest assistant message, or None if no assistant messages exist.
|
|
566
|
+
"""
|
|
567
|
+
return self.messages.get_last_assistant_message()
|
|
568
|
+
|
|
569
|
+
@classmethod
|
|
570
|
+
def build_streamlit_config(
|
|
571
|
+
cls: type[RB],
|
|
572
|
+
*,
|
|
573
|
+
display_title: str = "Example copilot",
|
|
574
|
+
description: str | None = None,
|
|
575
|
+
system_vector_store: Sequence[str] | str | None = None,
|
|
576
|
+
preserve_vector_stores: bool = False,
|
|
577
|
+
model: str | None = None,
|
|
578
|
+
) -> StreamlitAppConfig:
|
|
579
|
+
"""Construct a StreamlitAppConfig bound to this response class.
|
|
580
|
+
|
|
581
|
+
Creates a complete Streamlit application configuration using the
|
|
582
|
+
calling class as the response builder. This enables rapid deployment
|
|
583
|
+
of chat interfaces for custom response classes.
|
|
584
|
+
|
|
585
|
+
Parameters
|
|
586
|
+
----------
|
|
587
|
+
display_title : str, default "Example copilot"
|
|
588
|
+
Title displayed at the top of the Streamlit page.
|
|
589
|
+
description : str or None, default None
|
|
590
|
+
Optional description shown beneath the title.
|
|
591
|
+
system_vector_store : Sequence[str], str, or None, default None
|
|
592
|
+
Optional vector store name(s) to attach as system context.
|
|
593
|
+
Single string or sequence of strings.
|
|
594
|
+
preserve_vector_stores : bool, default False
|
|
595
|
+
When True, skip automatic cleanup of vector stores on session close.
|
|
596
|
+
model : str or None, default None
|
|
597
|
+
Optional model identifier displayed in the chat interface.
|
|
598
|
+
|
|
599
|
+
Returns
|
|
600
|
+
-------
|
|
601
|
+
StreamlitAppConfig
|
|
602
|
+
Fully configured Streamlit application bound to this response class.
|
|
603
|
+
|
|
604
|
+
Examples
|
|
605
|
+
--------
|
|
606
|
+
>>> config = MyResponse.build_streamlit_config(
|
|
607
|
+
... display_title="My Assistant",
|
|
608
|
+
... description="A helpful AI assistant",
|
|
609
|
+
... system_vector_store=["docs", "kb"],
|
|
610
|
+
... model="gpt-4"
|
|
611
|
+
... )
|
|
612
|
+
"""
|
|
613
|
+
from openai_sdk_helpers.streamlit_app.config import StreamlitAppConfig
|
|
614
|
+
|
|
615
|
+
normalized_stores = None
|
|
616
|
+
if system_vector_store is not None:
|
|
617
|
+
normalized_stores = ensure_list(system_vector_store)
|
|
618
|
+
|
|
619
|
+
return StreamlitAppConfig(
|
|
620
|
+
response=cls,
|
|
621
|
+
display_title=display_title,
|
|
622
|
+
description=description,
|
|
623
|
+
system_vector_store=normalized_stores,
|
|
624
|
+
preserve_vector_stores=preserve_vector_stores,
|
|
625
|
+
model=model,
|
|
626
|
+
)
|
|
627
|
+
|
|
628
|
+
def save(self, filepath: str | Path | None = None) -> None:
|
|
629
|
+
"""Serialize the message history to a JSON file.
|
|
630
|
+
|
|
631
|
+
Saves the complete conversation history to disk. The target path
|
|
632
|
+
is determined by filepath parameter, save_path from initialization,
|
|
633
|
+
or data_path_fn if configured.
|
|
634
|
+
|
|
635
|
+
Parameters
|
|
636
|
+
----------
|
|
637
|
+
filepath : str, Path, or None, default None
|
|
638
|
+
Optional explicit path for the JSON file. If None, uses save_path
|
|
639
|
+
or constructs path from data_path_fn and session UUID.
|
|
640
|
+
|
|
641
|
+
Notes
|
|
642
|
+
-----
|
|
643
|
+
If no save location is configured (no filepath, save_path, or
|
|
644
|
+
data_path_fn), the save operation is silently skipped.
|
|
645
|
+
|
|
646
|
+
Examples
|
|
647
|
+
--------
|
|
648
|
+
>>> response.save("/path/to/session.json")
|
|
649
|
+
>>> response.save() # Uses configured save_path or data_path
|
|
650
|
+
"""
|
|
450
651
|
if filepath is not None:
|
|
451
652
|
target = Path(filepath)
|
|
452
653
|
elif self._save_path is not None:
|
|
@@ -455,7 +656,7 @@ class ResponseBase(Generic[T]):
|
|
|
455
656
|
else:
|
|
456
657
|
filename = f"{str(self.uuid).lower()}.json"
|
|
457
658
|
target = self._save_path / filename
|
|
458
|
-
elif self._data_path_fn is not None and self.
|
|
659
|
+
elif self._data_path_fn is not None and self._name is not None:
|
|
459
660
|
filename = f"{str(self.uuid).lower()}.json"
|
|
460
661
|
target = self.data_path / filename
|
|
461
662
|
else:
|
|
@@ -469,37 +670,74 @@ class ResponseBase(Generic[T]):
|
|
|
469
670
|
log(f"Saved messages to {target}")
|
|
470
671
|
|
|
471
672
|
def __repr__(self) -> str:
|
|
472
|
-
"""Return
|
|
673
|
+
"""Return a detailed string representation of the response session.
|
|
674
|
+
|
|
675
|
+
Returns
|
|
676
|
+
-------
|
|
677
|
+
str
|
|
678
|
+
String showing class name, model, UUID, message count, and data path.
|
|
679
|
+
"""
|
|
473
680
|
data_path = None
|
|
474
|
-
if self._data_path_fn is not None and self.
|
|
681
|
+
if self._data_path_fn is not None and self._name is not None:
|
|
475
682
|
data_path = self.data_path
|
|
476
683
|
return (
|
|
477
684
|
f"<{self.__class__.__name__}(model={self._model}, uuid={self.uuid}, "
|
|
478
685
|
f"messages={len(self.messages.messages)}, data_path={data_path}>"
|
|
479
686
|
)
|
|
480
687
|
|
|
481
|
-
def __enter__(self) ->
|
|
482
|
-
"""Enter the context manager for
|
|
688
|
+
def __enter__(self) -> BaseResponse[T]:
|
|
689
|
+
"""Enter the context manager for resource management.
|
|
690
|
+
|
|
691
|
+
Returns
|
|
692
|
+
-------
|
|
693
|
+
BaseResponse[T]
|
|
694
|
+
Self reference for use in with statements.
|
|
695
|
+
"""
|
|
483
696
|
return self
|
|
484
697
|
|
|
485
698
|
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
|
|
486
|
-
"""Exit the context manager and
|
|
699
|
+
"""Exit the context manager and clean up resources.
|
|
700
|
+
|
|
701
|
+
Parameters
|
|
702
|
+
----------
|
|
703
|
+
exc_type : type or None
|
|
704
|
+
Exception type if an exception occurred, otherwise None.
|
|
705
|
+
exc_val : Exception or None
|
|
706
|
+
Exception instance if an exception occurred, otherwise None.
|
|
707
|
+
exc_tb : traceback or None
|
|
708
|
+
Traceback object if an exception occurred, otherwise None.
|
|
709
|
+
"""
|
|
487
710
|
self.close()
|
|
488
711
|
|
|
489
712
|
def close(self) -> None:
|
|
490
|
-
"""
|
|
713
|
+
"""Clean up session resources including vector stores.
|
|
714
|
+
|
|
715
|
+
Saves the current message history and deletes managed vector stores.
|
|
716
|
+
User vector stores are always cleaned up. System vector store cleanup
|
|
717
|
+
is handled via tool configuration.
|
|
718
|
+
|
|
719
|
+
Notes
|
|
720
|
+
-----
|
|
721
|
+
This method is automatically called when using the response as a
|
|
722
|
+
context manager. Always call close() or use a with statement to
|
|
723
|
+
ensure proper resource cleanup.
|
|
724
|
+
|
|
725
|
+
Examples
|
|
726
|
+
--------
|
|
727
|
+
>>> response = BaseResponse(...)
|
|
728
|
+
>>> try:
|
|
729
|
+
... result = response.run_sync("query")
|
|
730
|
+
... finally:
|
|
731
|
+
... response.close()
|
|
732
|
+
"""
|
|
491
733
|
log(f"Closing session {self.uuid} for {self.__class__.__name__}")
|
|
492
|
-
|
|
734
|
+
self.save()
|
|
735
|
+
# Always clean user vector storage if it exists
|
|
493
736
|
try:
|
|
494
|
-
if self._user_vector_storage
|
|
737
|
+
if self._user_vector_storage:
|
|
495
738
|
self._user_vector_storage.delete()
|
|
496
739
|
log("User vector store deleted.")
|
|
497
740
|
except Exception as exc:
|
|
498
741
|
log(f"Error deleting user vector store: {exc}", level=logging.WARNING)
|
|
499
|
-
|
|
500
|
-
if self._system_vector_storage and self._cleanup_system_vector_storage:
|
|
501
|
-
self._system_vector_storage.delete()
|
|
502
|
-
log("System vector store deleted.")
|
|
503
|
-
except Exception as exc:
|
|
504
|
-
log(f"Error deleting system vector store: {exc}", level=logging.WARNING)
|
|
742
|
+
# System vector store cleanup is now handled via tool configuration
|
|
505
743
|
log(f"Session {self.uuid} closed.")
|