openai-sdk-helpers 0.0.8__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. openai_sdk_helpers/__init__.py +66 -2
  2. openai_sdk_helpers/agent/__init__.py +8 -4
  3. openai_sdk_helpers/agent/base.py +80 -45
  4. openai_sdk_helpers/agent/config.py +6 -4
  5. openai_sdk_helpers/agent/{project_manager.py → coordination.py} +29 -45
  6. openai_sdk_helpers/agent/prompt_utils.py +7 -1
  7. openai_sdk_helpers/agent/runner.py +67 -141
  8. openai_sdk_helpers/agent/search/__init__.py +33 -0
  9. openai_sdk_helpers/agent/search/base.py +297 -0
  10. openai_sdk_helpers/agent/{vector_search.py → search/vector.py} +89 -157
  11. openai_sdk_helpers/agent/{web_search.py → search/web.py} +77 -156
  12. openai_sdk_helpers/agent/summarizer.py +29 -8
  13. openai_sdk_helpers/agent/translator.py +40 -13
  14. openai_sdk_helpers/agent/validation.py +32 -8
  15. openai_sdk_helpers/async_utils.py +132 -0
  16. openai_sdk_helpers/config.py +74 -36
  17. openai_sdk_helpers/context_manager.py +241 -0
  18. openai_sdk_helpers/enums/__init__.py +9 -1
  19. openai_sdk_helpers/enums/base.py +67 -8
  20. openai_sdk_helpers/environment.py +33 -6
  21. openai_sdk_helpers/errors.py +133 -0
  22. openai_sdk_helpers/logging_config.py +105 -0
  23. openai_sdk_helpers/prompt/__init__.py +10 -71
  24. openai_sdk_helpers/prompt/base.py +172 -0
  25. openai_sdk_helpers/response/__init__.py +35 -3
  26. openai_sdk_helpers/response/base.py +363 -210
  27. openai_sdk_helpers/response/config.py +176 -0
  28. openai_sdk_helpers/response/messages.py +56 -40
  29. openai_sdk_helpers/response/runner.py +77 -33
  30. openai_sdk_helpers/response/tool_call.py +49 -25
  31. openai_sdk_helpers/response/vector_store.py +27 -14
  32. openai_sdk_helpers/retry.py +175 -0
  33. openai_sdk_helpers/streamlit_app/__init__.py +19 -2
  34. openai_sdk_helpers/streamlit_app/app.py +114 -39
  35. openai_sdk_helpers/streamlit_app/config.py +502 -0
  36. openai_sdk_helpers/streamlit_app/streamlit_web_search.py +5 -6
  37. openai_sdk_helpers/structure/__init__.py +69 -3
  38. openai_sdk_helpers/structure/agent_blueprint.py +82 -19
  39. openai_sdk_helpers/structure/base.py +208 -93
  40. openai_sdk_helpers/structure/plan/__init__.py +15 -1
  41. openai_sdk_helpers/structure/plan/enum.py +41 -5
  42. openai_sdk_helpers/structure/plan/plan.py +101 -45
  43. openai_sdk_helpers/structure/plan/task.py +38 -6
  44. openai_sdk_helpers/structure/prompt.py +21 -2
  45. openai_sdk_helpers/structure/responses.py +52 -11
  46. openai_sdk_helpers/structure/summary.py +55 -7
  47. openai_sdk_helpers/structure/validation.py +34 -6
  48. openai_sdk_helpers/structure/vector_search.py +132 -18
  49. openai_sdk_helpers/structure/web_search.py +125 -13
  50. openai_sdk_helpers/types.py +57 -0
  51. openai_sdk_helpers/utils/__init__.py +30 -1
  52. openai_sdk_helpers/utils/core.py +168 -34
  53. openai_sdk_helpers/validation.py +302 -0
  54. openai_sdk_helpers/vector_storage/__init__.py +21 -1
  55. openai_sdk_helpers/vector_storage/cleanup.py +25 -13
  56. openai_sdk_helpers/vector_storage/storage.py +123 -64
  57. openai_sdk_helpers/vector_storage/types.py +20 -19
  58. openai_sdk_helpers-0.0.9.dist-info/METADATA +550 -0
  59. openai_sdk_helpers-0.0.9.dist-info/RECORD +66 -0
  60. openai_sdk_helpers/streamlit_app/configuration.py +0 -324
  61. openai_sdk_helpers-0.0.8.dist-info/METADATA +0 -194
  62. openai_sdk_helpers-0.0.8.dist-info/RECORD +0 -55
  63. {openai_sdk_helpers-0.0.8.dist-info → openai_sdk_helpers-0.0.9.dist-info}/WHEEL +0 -0
  64. {openai_sdk_helpers-0.0.8.dist-info → openai_sdk_helpers-0.0.9.dist-info}/licenses/LICENSE +0 -0
@@ -1,4 +1,10 @@
1
- """Base response handling for OpenAI interactions."""
1
+ """Core response management for OpenAI API interactions.
2
+
3
+ This module implements the BaseResponse class, which manages the complete
4
+ lifecycle of OpenAI API interactions including input construction, tool
5
+ execution, message history, vector store attachments, and structured output
6
+ parsing.
7
+ """
2
8
 
3
9
  from __future__ import annotations
4
10
 
@@ -14,17 +20,11 @@ from typing import (
14
20
  Any,
15
21
  Callable,
16
22
  Generic,
17
- List,
18
- Optional,
19
23
  Sequence,
20
- Tuple,
21
- Type,
22
24
  TypeVar,
23
- Union,
24
25
  cast,
25
26
  )
26
27
 
27
- from openai import OpenAI
28
28
  from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
29
29
  from openai.types.responses.response_input_file_param import ResponseInputFileParam
30
30
  from openai.types.responses.response_input_message_content_list_param import (
@@ -35,126 +35,172 @@ from openai.types.responses.response_input_text_param import ResponseInputTextPa
35
35
  from openai.types.responses.response_output_message import ResponseOutputMessage
36
36
 
37
37
  from .messages import ResponseMessage, ResponseMessages
38
+ from ..config import OpenAISettings
38
39
  from ..structure import BaseStructure
40
+ from ..types import OpenAIClient
39
41
  from ..utils import ensure_list, log
40
42
 
41
- if TYPE_CHECKING:
42
- from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
43
+ if TYPE_CHECKING: # pragma: no cover - only for typing hints
44
+ from openai_sdk_helpers.streamlit_app.config import StreamlitAppConfig
43
45
 
44
46
  T = TypeVar("T", bound=BaseStructure)
45
- ToolHandler = Callable[[ResponseFunctionToolCall], Union[str, Any]]
46
- ProcessContent = Callable[[str], Tuple[str, List[str]]]
47
+ ToolHandler = Callable[[ResponseFunctionToolCall], str | Any]
48
+ ProcessContent = Callable[[str], tuple[str, list[str]]]
47
49
 
48
50
 
49
51
  RB = TypeVar("RB", bound="BaseResponse[BaseStructure]")
50
52
 
51
53
 
52
54
  class BaseResponse(Generic[T]):
53
- """Manage OpenAI interactions for structured responses.
54
-
55
- This base class handles input construction, OpenAI requests, tool calls,
56
- and optional parsing into structured output models.
55
+ """Manage OpenAI API interactions for structured responses.
56
+
57
+ Orchestrates the complete lifecycle of OpenAI API requests including
58
+ input construction, tool execution, message history management, vector
59
+ store attachments, and structured output parsing. Supports both
60
+ synchronous and asynchronous execution with automatic resource cleanup.
61
+
62
+ The class handles conversation state, tool calls with custom handlers,
63
+ file attachments via vector stores, and optional parsing into typed
64
+ structured output models. Sessions can be persisted to disk and restored.
65
+
66
+ Attributes
67
+ ----------
68
+ uuid : UUID
69
+ Unique identifier for this response session.
70
+ name : str
71
+ Lowercase class name used for path construction.
72
+ messages : ResponseMessages
73
+ Complete message history for this session.
57
74
 
58
75
  Methods
59
76
  -------
60
- run_async(content, attachments)
77
+ run_async(content, attachments=None)
61
78
  Generate a response asynchronously and return parsed output.
62
- run_sync(content, attachments)
63
- Synchronous wrapper around ``run_async``.
64
- run_streamed(content, attachments)
65
- Await ``run_async`` to mirror the agent API.
66
- build_streamlit_config(...)
67
- Construct a :class:`StreamlitAppConfig` using this class as the builder.
68
- save(filepath)
69
- Serialize the message history to disk.
79
+ run_sync(content, attachments=None)
80
+ Execute run_async synchronously with thread management.
81
+ run_streamed(content, attachments=None)
82
+ Execute run_async and await the result (streaming not yet supported).
83
+ get_last_tool_message()
84
+ Return the most recent tool message or None.
85
+ get_last_user_message()
86
+ Return the most recent user message or None.
87
+ get_last_assistant_message()
88
+ Return the most recent assistant message or None.
89
+ build_streamlit_config(**kwargs)
90
+ Construct a StreamlitAppConfig using this class as the builder.
91
+ save(filepath=None)
92
+ Serialize the message history to a JSON file.
70
93
  close()
71
- Clean up remote resources (vector stores).
94
+ Clean up remote resources including vector stores.
95
+
96
+ Examples
97
+ --------
98
+ >>> from openai_sdk_helpers import BaseResponse, OpenAISettings
99
+ >>> settings = OpenAISettings(api_key="...", default_model="gpt-4")
100
+ >>> response = BaseResponse(
101
+ ... instructions="You are a helpful assistant",
102
+ ... tools=None,
103
+ ... output_structure=None,
104
+ ... tool_handlers={},
105
+ ... openai_settings=settings
106
+ ... )
107
+ >>> result = response.run_sync("Hello, world!")
108
+ >>> response.close()
72
109
  """
73
110
 
74
111
  def __init__(
75
112
  self,
76
113
  *,
77
114
  instructions: str,
78
- tools: Optional[list],
79
- schema: Optional[Any],
80
- output_structure: Optional[Type[T]],
115
+ tools: list | None,
116
+ output_structure: type[T] | None,
81
117
  tool_handlers: dict[str, ToolHandler],
82
- process_content: Optional[ProcessContent] = None,
83
- module_name: Optional[str] = None,
84
- vector_storage_cls: Optional[type] = None,
85
- client: Optional[OpenAI] = None,
86
- model: Optional[str] = None,
87
- api_key: Optional[str] = None,
88
- attachments: Optional[Union[Tuple[str, str], list[Tuple[str, str]]]] = None,
89
- data_path_fn: Optional[Callable[[str], Path]] = None,
90
- save_path: Optional[Path | str] = None,
118
+ openai_settings: OpenAISettings,
119
+ process_content: ProcessContent | None = None,
120
+ name: str | None = None,
121
+ system_vector_store: list[str] | None = None,
122
+ data_path_fn: Callable[[str], Path] | None = None,
123
+ save_path: Path | str | None = None,
91
124
  ) -> None:
92
- """Initialize a response session.
125
+ """Initialize a response session with OpenAI configuration.
126
+
127
+ Sets up the OpenAI client, message history, vector stores, and tool
128
+ handlers for a complete response workflow. The session can optionally
129
+ be persisted to disk for later restoration.
93
130
 
94
131
  Parameters
95
132
  ----------
96
133
  instructions : str
97
- System instructions for the OpenAI response.
134
+ System instructions provided to the OpenAI API for context.
98
135
  tools : list or None
99
- Tool definitions for the OpenAI request.
100
- schema : object or None
101
- Optional response schema configuration.
136
+ Tool definitions for the OpenAI API request. Pass None for no tools.
102
137
  output_structure : type[BaseStructure] or None
103
- Structure type used to parse tool call outputs.
138
+ Structure class used to parse tool call outputs. When provided,
139
+ the schema is automatically generated using the structure's
140
+ response_format() method. Pass None for unstructured responses.
104
141
  tool_handlers : dict[str, ToolHandler]
105
- Mapping of tool names to handler callables.
106
- process_content : callable, optional
107
- Callback that cleans input text and extracts attachments.
108
- module_name : str, optional
109
- Module name used to build the data path.
110
- vector_storage_cls : type, optional
111
- Vector storage class used for file uploads.
112
- client : OpenAI or None, default=None
113
- Optional pre-initialized OpenAI client.
114
- model : str or None, default=None
115
- Optional OpenAI model name override.
116
- api_key : str or None, default=None
117
- Optional OpenAI API key override.
118
- attachments : tuple or list of tuples, optional
119
- File attachments in the form ``(file_path, tool_type)``.
120
- data_path_fn : callable or None, default=None
121
- Function that maps ``module_name`` to a base data path.
122
- save_path : Path | str or None, default=None
123
- Optional path to a directory or file for persisted messages.
142
+ Mapping from tool names to callable handlers. Each handler receives
143
+ a ResponseFunctionToolCall and returns a string or any serializable
144
+ result.
145
+ openai_settings : OpenAISettings
146
+ Fully configured OpenAI settings with API key and default model.
147
+ process_content : callable or None, default None
148
+ Optional callback that processes input text and extracts file
149
+ attachments. Must return a tuple of (processed_text, attachment_list).
150
+ name : str or None, default None
151
+ Module name used for data path construction when data_path_fn is set.
152
+ system_vector_store : list[str] or None, default None
153
+ Optional list of vector store names to attach as system context.
154
+ data_path_fn : callable or None, default None
155
+ Function mapping name to a base directory path for artifact storage.
156
+ save_path : Path, str, or None, default None
157
+ Optional path to a directory or file where message history is saved.
158
+ If a directory, files are named using the session UUID.
124
159
 
125
160
  Raises
126
161
  ------
127
162
  ValueError
128
- If API key or model is missing.
163
+ If api_key is missing from openai_settings.
164
+ If default_model is missing from openai_settings.
129
165
  RuntimeError
130
166
  If the OpenAI client fails to initialize.
167
+
168
+ Examples
169
+ --------
170
+ >>> from openai_sdk_helpers import BaseResponse, OpenAISettings
171
+ >>> settings = OpenAISettings(api_key="sk-...", default_model="gpt-4")
172
+ >>> response = BaseResponse(
173
+ ... instructions="You are helpful",
174
+ ... tools=None,
175
+ ... output_structure=None,
176
+ ... tool_handlers={},
177
+ ... openai_settings=settings
178
+ ... )
131
179
  """
132
180
  self._tool_handlers = tool_handlers
133
181
  self._process_content = process_content
134
- self._module_name = module_name
135
- self._vector_storage_cls = vector_storage_cls
182
+ self._name = name
136
183
  self._data_path_fn = data_path_fn
137
184
  self._save_path = Path(save_path) if save_path is not None else None
138
185
  self._instructions = instructions
139
186
  self._tools = tools if tools is not None else []
140
- self._schema = schema
141
187
  self._output_structure = output_structure
142
- self._cleanup_user_vector_storage = False
143
- self._cleanup_system_vector_storage = False
144
-
145
- if client is None:
146
- if api_key is None:
147
- raise ValueError("OpenAI API key is required")
148
- try:
149
- self._client = OpenAI(api_key=api_key)
150
- except Exception as exc:
151
- raise RuntimeError("Failed to initialize OpenAI client") from exc
152
- else:
153
- self._client = client
188
+ self._openai_settings = openai_settings
189
+
190
+ if not self._openai_settings.api_key:
191
+ raise ValueError("OpenAI API key is required")
154
192
 
155
- self._model = model
193
+ self._client: OpenAIClient
194
+ try:
195
+ self._client = self._openai_settings.create_client()
196
+ except Exception as exc: # pragma: no cover - defensive guard
197
+ raise RuntimeError("Failed to initialize OpenAI client") from exc
198
+
199
+ self._model = self._openai_settings.default_model
156
200
  if not self._model:
157
- raise ValueError("OpenAI model is required")
201
+ raise ValueError(
202
+ "OpenAI model is required. Set 'default_model' on OpenAISettings."
203
+ )
158
204
 
159
205
  self.uuid = uuid.uuid4()
160
206
  self.name = self.__class__.__name__.lower()
@@ -163,78 +209,82 @@ class BaseResponse(Generic[T]):
163
209
  ResponseInputTextParam(type="input_text", text=instructions)
164
210
  ]
165
211
 
166
- self._system_vector_storage: Optional[Any] = None
167
- self._user_vector_storage: Optional[Any] = None
212
+ self._user_vector_storage: Any | None = None
213
+
214
+ # New logic: system_vector_store is a list of vector store names to attach
215
+ if system_vector_store:
216
+ from .vector_store import attach_vector_store
168
217
 
169
- if attachments:
170
- if self._vector_storage_cls is None:
171
- raise RuntimeError("vector_storage_cls is required for attachments.")
172
- self.file_objects: dict[str, List[str]] = {}
173
- storage_name = f"{self.__class__.__name__.lower()}_{self.name}_system"
174
- self._system_vector_storage = self._vector_storage_cls(
175
- store_name=storage_name, client=self._client, model=self._model
218
+ attach_vector_store(
219
+ self,
220
+ system_vector_store,
221
+ api_key=(
222
+ self._client.api_key
223
+ if hasattr(self._client, "api_key")
224
+ else self._openai_settings.api_key
225
+ ),
176
226
  )
177
- self._cleanup_system_vector_storage = True
178
- system_vector_storage = cast(Any, self._system_vector_storage)
179
- for file_path, tool_type in attachments:
180
- uploaded_file = system_vector_storage.upload_file(file_path=file_path)
181
- self.file_objects.setdefault(tool_type, []).append(uploaded_file.id)
182
-
183
- self.tool_resources = {}
184
- required_tools = []
185
-
186
- for tool_type, file_ids in self.file_objects.items():
187
- required_tools.append({"type": tool_type})
188
- self.tool_resources[tool_type] = {"file_ids": file_ids}
189
- if tool_type == "file_search":
190
- self.tool_resources[tool_type]["vector_store_ids"] = [
191
- system_vector_storage.id
192
- ]
193
-
194
- existing_tool_types = {tool["type"] for tool in self._tools}
195
- for tool in required_tools:
196
- tool_type = tool["type"]
197
- if tool_type == "file_search":
198
- tool["vector_store_ids"] = [system_vector_storage.id]
199
- if tool_type not in existing_tool_types:
200
- self._tools.append(tool)
201
227
 
202
228
  self.messages = ResponseMessages()
203
229
  self.messages.add_system_message(content=system_content)
204
230
  if self._save_path is not None or (
205
- self._data_path_fn is not None and self._module_name is not None
231
+ self._data_path_fn is not None and self._name is not None
206
232
  ):
207
233
  self.save()
208
234
 
209
235
  @property
210
236
  def data_path(self) -> Path:
211
- """Return the directory used to persist artifacts for this session.
237
+ """Return the directory for persisting session artifacts.
238
+
239
+ Constructs a path using data_path_fn, name, class name, and the
240
+ session name. Both data_path_fn and name must be set during
241
+ initialization for this property to work.
212
242
 
213
243
  Returns
214
244
  -------
215
245
  Path
216
- Absolute path for persisting response artifacts.
246
+ Absolute path for persisting response artifacts and message history.
247
+
248
+ Raises
249
+ ------
250
+ RuntimeError
251
+ If data_path_fn or name were not provided during initialization.
252
+
253
+ Examples
254
+ --------
255
+ >>> response.data_path
256
+ PosixPath('/data/myapp/baseresponse/session_123')
217
257
  """
218
- if self._data_path_fn is None or self._module_name is None:
258
+ if self._data_path_fn is None or self._name is None:
219
259
  raise RuntimeError(
220
- "data_path_fn and module_name are required to build data paths."
260
+ "data_path_fn and name are required to build data paths."
221
261
  )
222
- base_path = self._data_path_fn(self._module_name)
262
+ base_path = self._data_path_fn(self._name)
223
263
  return base_path / self.__class__.__name__.lower() / self.name
224
264
 
225
265
  def _build_input(
226
266
  self,
227
- content: Union[str, List[str]],
228
- attachments: Optional[List[str]] = None,
267
+ content: str | list[str],
268
+ attachments: list[str] | None = None,
229
269
  ) -> None:
230
- """Build the list of input messages for the OpenAI request.
270
+ """Construct input messages for the OpenAI API request.
271
+
272
+ Processes content through the optional process_content callback,
273
+ uploads any file attachments to vector stores, and adds all
274
+ messages to the conversation history.
231
275
 
232
276
  Parameters
233
277
  ----------
234
- content
278
+ content : str or list[str]
235
279
  String or list of strings to include as user messages.
236
- attachments
237
- Optional list of file paths to upload and attach.
280
+ attachments : list[str] or None, default None
281
+ Optional list of file paths to upload and attach to the message.
282
+
283
+ Notes
284
+ -----
285
+ If attachments are provided and no user vector storage exists, this
286
+ method automatically creates one and adds a file_search tool to
287
+ the tools list.
238
288
  """
239
289
  contents = ensure_list(content)
240
290
 
@@ -243,25 +293,22 @@ class BaseResponse(Generic[T]):
243
293
  processed_text, content_attachments = raw_content, []
244
294
  else:
245
295
  processed_text, content_attachments = self._process_content(raw_content)
246
- input_content: List[
247
- Union[ResponseInputTextParam, ResponseInputFileParam]
248
- ] = [ResponseInputTextParam(type="input_text", text=processed_text)]
296
+ input_content: list[ResponseInputTextParam | ResponseInputFileParam] = [
297
+ ResponseInputTextParam(type="input_text", text=processed_text)
298
+ ]
249
299
 
250
300
  all_attachments = (attachments or []) + content_attachments
251
301
 
252
302
  for file_path in all_attachments:
253
303
  if self._user_vector_storage is None:
254
- if self._vector_storage_cls is None:
255
- raise RuntimeError(
256
- "vector_storage_cls is required for attachments."
257
- )
304
+ from openai_sdk_helpers.vector_storage import VectorStorage
305
+
258
306
  store_name = f"{self.__class__.__name__.lower()}_{self.name}_{self.uuid}_user"
259
- self._user_vector_storage = self._vector_storage_cls(
307
+ self._user_vector_storage = VectorStorage(
260
308
  store_name=store_name,
261
309
  client=self._client,
262
310
  model=self._model,
263
311
  )
264
- self._cleanup_user_vector_storage = True
265
312
  user_vector_storage = cast(Any, self._user_vector_storage)
266
313
  if not any(
267
314
  tool.get("type") == "file_search" for tool in self._tools
@@ -273,13 +320,8 @@ class BaseResponse(Generic[T]):
273
320
  }
274
321
  )
275
322
  else:
276
- for tool in self._tools:
277
- if tool.get("type") == "file_search":
278
- if self._system_vector_storage is not None:
279
- tool["vector_store_ids"] = [
280
- cast(Any, self._system_vector_storage).id,
281
- user_vector_storage.id,
282
- ]
323
+ # If system vector store is attached, its ID will be in tool config
324
+ pass
283
325
  user_vector_storage = cast(Any, self._user_vector_storage)
284
326
  uploaded_file = user_vector_storage.upload_file(file_path)
285
327
  input_content.append(
@@ -294,32 +336,43 @@ class BaseResponse(Generic[T]):
294
336
 
295
337
  async def run_async(
296
338
  self,
297
- content: Union[str, List[str]],
298
- attachments: Optional[Union[str, List[str]]] = None,
299
- ) -> Optional[T]:
300
- """Generate a response asynchronously.
339
+ content: str | list[str],
340
+ attachments: str | list[str] | None = None,
341
+ ) -> T | None:
342
+ """Generate a response asynchronously from the OpenAI API.
343
+
344
+ Builds input messages, sends the request to OpenAI, processes any
345
+ tool calls with registered handlers, and optionally parses the
346
+ result into the configured output_structure.
301
347
 
302
348
  Parameters
303
349
  ----------
304
- content
305
- Prompt text or list of texts.
306
- attachments
307
- Optional file path or list of paths to upload and attach.
350
+ content : str or list[str]
351
+ Prompt text or list of prompt texts to send.
352
+ attachments : str, list[str], or None, default None
353
+ Optional file path or list of file paths to upload and attach.
308
354
 
309
355
  Returns
310
356
  -------
311
- Optional[T]
312
- Parsed response object or ``None``.
357
+ T or None
358
+ Parsed response object of type output_structure, or None if
359
+ no structured output was produced.
313
360
 
314
361
  Raises
315
362
  ------
316
363
  RuntimeError
317
- If the API returns no output or a tool handler errors.
364
+ If the API returns no output.
365
+ If a tool handler raises an exception.
318
366
  ValueError
319
- If no handler is found for a tool invoked by the API.
367
+ If the API invokes a tool with no registered handler.
368
+
369
+ Examples
370
+ --------
371
+ >>> result = await response.run_async("Analyze this text")
372
+ >>> print(result)
320
373
  """
321
374
  log(f"{self.__class__.__name__}::run_response")
322
- parsed_result: Optional[T] = None
375
+ parsed_result: T | None = None
323
376
 
324
377
  self._build_input(
325
378
  content=content,
@@ -330,8 +383,8 @@ class BaseResponse(Generic[T]):
330
383
  "input": self.messages.to_openai_payload(),
331
384
  "model": self._model,
332
385
  }
333
- if self._schema is not None:
334
- kwargs["text"] = self._schema
386
+ if not self._tools and self._output_structure is not None:
387
+ kwargs["text"] = self._output_structure.response_format()
335
388
 
336
389
  if self._tools:
337
390
  kwargs["tools"] = self._tools
@@ -397,7 +450,7 @@ class BaseResponse(Generic[T]):
397
450
  log("No tool call. Parsing output_text.")
398
451
  try:
399
452
  output_dict = json.loads(raw_text)
400
- if self._output_structure and self._schema:
453
+ if self._output_structure:
401
454
  return self._output_structure.from_raw_input(output_dict)
402
455
  return output_dict
403
456
  except Exception:
@@ -408,19 +461,41 @@ class BaseResponse(Generic[T]):
408
461
 
409
462
  def run_sync(
410
463
  self,
411
- content: Union[str, List[str]],
412
- attachments: Optional[Union[str, List[str]]] = None,
413
- ) -> Optional[T]:
414
- """Run :meth:`run_response_async` synchronously."""
464
+ content: str | list[str],
465
+ attachments: str | list[str] | None = None,
466
+ ) -> T | None:
467
+ """Execute run_async synchronously with proper event loop handling.
468
+
469
+ Automatically detects if an event loop is already running and uses
470
+ a separate thread if necessary. This enables safe usage in both
471
+ synchronous and asynchronous contexts.
472
+
473
+ Parameters
474
+ ----------
475
+ content : str or list[str]
476
+ Prompt text or list of prompt texts to send.
477
+ attachments : str, list[str], or None, default None
478
+ Optional file path or list of file paths to upload and attach.
479
+
480
+ Returns
481
+ -------
482
+ T or None
483
+ Parsed response object of type output_structure, or None.
484
+
485
+ Examples
486
+ --------
487
+ >>> result = response.run_sync("Summarize this document")
488
+ >>> print(result)
489
+ """
415
490
 
416
- async def runner() -> Optional[T]:
491
+ async def runner() -> T | None:
417
492
  return await self.run_async(content=content, attachments=attachments)
418
493
 
419
494
  try:
420
495
  asyncio.get_running_loop()
421
496
  except RuntimeError:
422
497
  return asyncio.run(runner())
423
- result: Optional[T] = None
498
+ result: T | None = None
424
499
 
425
500
  def _thread_func() -> None:
426
501
  nonlocal result
@@ -433,55 +508,61 @@ class BaseResponse(Generic[T]):
433
508
 
434
509
  def run_streamed(
435
510
  self,
436
- content: Union[str, List[str]],
437
- attachments: Optional[Union[str, List[str]]] = None,
438
- ) -> Optional[T]:
439
- """Generate a response asynchronously and return the awaited result.
511
+ content: str | list[str],
512
+ attachments: str | list[str] | None = None,
513
+ ) -> T | None:
514
+ """Execute run_async and await the result.
440
515
 
441
- Streaming is not yet supported for responses, so this helper simply
442
- awaits :meth:`run_async` to mirror the agent API.
516
+ Streaming responses are not yet fully supported, so this method
517
+ simply awaits run_async to provide API compatibility with agent
518
+ interfaces.
443
519
 
444
520
  Parameters
445
521
  ----------
446
- content
447
- Prompt text or list of texts.
448
- attachments
449
- Optional file path or list of paths to upload and attach.
522
+ content : str or list[str]
523
+ Prompt text or list of prompt texts to send.
524
+ attachments : str, list[str], or None, default None
525
+ Optional file path or list of file paths to upload and attach.
450
526
 
451
527
  Returns
452
528
  -------
453
- Optional[T]
454
- Parsed response object or ``None``.
529
+ T or None
530
+ Parsed response object of type output_structure, or None.
531
+
532
+ Notes
533
+ -----
534
+ This method exists for API consistency but does not currently
535
+ provide true streaming functionality.
455
536
  """
456
537
  return asyncio.run(self.run_async(content=content, attachments=attachments))
457
538
 
458
539
  def get_last_tool_message(self) -> ResponseMessage | None:
459
- """Return the most recent tool message.
540
+ """Return the most recent tool message from conversation history.
460
541
 
461
542
  Returns
462
543
  -------
463
544
  ResponseMessage or None
464
- Latest tool message or ``None`` when absent.
545
+ Latest tool message, or None if no tool messages exist.
465
546
  """
466
547
  return self.messages.get_last_tool_message()
467
548
 
468
549
  def get_last_user_message(self) -> ResponseMessage | None:
469
- """Return the most recent user message.
550
+ """Return the most recent user message from conversation history.
470
551
 
471
552
  Returns
472
553
  -------
473
554
  ResponseMessage or None
474
- Latest user message or ``None`` when absent.
555
+ Latest user message, or None if no user messages exist.
475
556
  """
476
557
  return self.messages.get_last_user_message()
477
558
 
478
559
  def get_last_assistant_message(self) -> ResponseMessage | None:
479
- """Return the most recent assistant message.
560
+ """Return the most recent assistant message from conversation history.
480
561
 
481
562
  Returns
482
563
  -------
483
564
  ResponseMessage or None
484
- Latest assistant message or ``None`` when absent.
565
+ Latest assistant message, or None if no assistant messages exist.
485
566
  """
486
567
  return self.messages.get_last_assistant_message()
487
568
 
@@ -494,28 +575,42 @@ class BaseResponse(Generic[T]):
494
575
  system_vector_store: Sequence[str] | str | None = None,
495
576
  preserve_vector_stores: bool = False,
496
577
  model: str | None = None,
497
- ) -> "StreamlitAppConfig":
498
- """Construct a :class:`StreamlitAppConfig` using ``cls`` as the builder.
578
+ ) -> StreamlitAppConfig:
579
+ """Construct a StreamlitAppConfig bound to this response class.
580
+
581
+ Creates a complete Streamlit application configuration using the
582
+ calling class as the response builder. This enables rapid deployment
583
+ of chat interfaces for custom response classes.
499
584
 
500
585
  Parameters
501
586
  ----------
502
- display_title : str, default="Example copilot"
587
+ display_title : str, default "Example copilot"
503
588
  Title displayed at the top of the Streamlit page.
504
- description : str or None, default=None
505
- Optional short description shown beneath the title.
506
- system_vector_store : Sequence[str] | str | None, default=None
507
- Optional vector store names to attach as system context.
508
- preserve_vector_stores : bool, default=False
509
- When ``True``, skip automatic vector store cleanup on close.
510
- model : str or None, default=None
511
- Optional model hint for display alongside the chat interface.
589
+ description : str or None, default None
590
+ Optional description shown beneath the title.
591
+ system_vector_store : Sequence[str], str, or None, default None
592
+ Optional vector store name(s) to attach as system context.
593
+ Single string or sequence of strings.
594
+ preserve_vector_stores : bool, default False
595
+ When True, skip automatic cleanup of vector stores on session close.
596
+ model : str or None, default None
597
+ Optional model identifier displayed in the chat interface.
512
598
 
513
599
  Returns
514
600
  -------
515
601
  StreamlitAppConfig
516
- Validated configuration bound to ``cls`` as the response builder.
602
+ Fully configured Streamlit application bound to this response class.
603
+
604
+ Examples
605
+ --------
606
+ >>> config = MyResponse.build_streamlit_config(
607
+ ... display_title="My Assistant",
608
+ ... description="A helpful AI assistant",
609
+ ... system_vector_store=["docs", "kb"],
610
+ ... model="gpt-4"
611
+ ... )
517
612
  """
518
- from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
613
+ from openai_sdk_helpers.streamlit_app.config import StreamlitAppConfig
519
614
 
520
615
  normalized_stores = None
521
616
  if system_vector_store is not None:
@@ -530,8 +625,29 @@ class BaseResponse(Generic[T]):
530
625
  model=model,
531
626
  )
532
627
 
533
- def save(self, filepath: Optional[str | Path] = None) -> None:
534
- """Serialize the message history to a JSON file."""
628
+ def save(self, filepath: str | Path | None = None) -> None:
629
+ """Serialize the message history to a JSON file.
630
+
631
+ Saves the complete conversation history to disk. The target path
632
+ is determined by filepath parameter, save_path from initialization,
633
+ or data_path_fn if configured.
634
+
635
+ Parameters
636
+ ----------
637
+ filepath : str, Path, or None, default None
638
+ Optional explicit path for the JSON file. If None, uses save_path
639
+ or constructs path from data_path_fn and session UUID.
640
+
641
+ Notes
642
+ -----
643
+ If no save location is configured (no filepath, save_path, or
644
+ data_path_fn), the save operation is silently skipped.
645
+
646
+ Examples
647
+ --------
648
+ >>> response.save("/path/to/session.json")
649
+ >>> response.save() # Uses configured save_path or data_path
650
+ """
535
651
  if filepath is not None:
536
652
  target = Path(filepath)
537
653
  elif self._save_path is not None:
@@ -540,7 +656,7 @@ class BaseResponse(Generic[T]):
540
656
  else:
541
657
  filename = f"{str(self.uuid).lower()}.json"
542
658
  target = self._save_path / filename
543
- elif self._data_path_fn is not None and self._module_name is not None:
659
+ elif self._data_path_fn is not None and self._name is not None:
544
660
  filename = f"{str(self.uuid).lower()}.json"
545
661
  target = self.data_path / filename
546
662
  else:
@@ -554,37 +670,74 @@ class BaseResponse(Generic[T]):
554
670
  log(f"Saved messages to {target}")
555
671
 
556
672
  def __repr__(self) -> str:
557
- """Return an unambiguous representation including model and UUID."""
673
+ """Return a detailed string representation of the response session.
674
+
675
+ Returns
676
+ -------
677
+ str
678
+ String showing class name, model, UUID, message count, and data path.
679
+ """
558
680
  data_path = None
559
- if self._data_path_fn is not None and self._module_name is not None:
681
+ if self._data_path_fn is not None and self._name is not None:
560
682
  data_path = self.data_path
561
683
  return (
562
684
  f"<{self.__class__.__name__}(model={self._model}, uuid={self.uuid}, "
563
685
  f"messages={len(self.messages.messages)}, data_path={data_path}>"
564
686
  )
565
687
 
566
- def __enter__(self) -> "BaseResponse[T]":
567
- """Enter the context manager for this response session."""
688
+ def __enter__(self) -> BaseResponse[T]:
689
+ """Enter the context manager for resource management.
690
+
691
+ Returns
692
+ -------
693
+ BaseResponse[T]
694
+ Self reference for use in with statements.
695
+ """
568
696
  return self
569
697
 
570
698
  def __exit__(self, exc_type, exc_val, exc_tb) -> None:
571
- """Exit the context manager and close remote resources."""
699
+ """Exit the context manager and clean up resources.
700
+
701
+ Parameters
702
+ ----------
703
+ exc_type : type or None
704
+ Exception type if an exception occurred, otherwise None.
705
+ exc_val : Exception or None
706
+ Exception instance if an exception occurred, otherwise None.
707
+ exc_tb : traceback or None
708
+ Traceback object if an exception occurred, otherwise None.
709
+ """
572
710
  self.close()
573
711
 
574
712
  def close(self) -> None:
575
- """Delete managed vector stores and clean up the session."""
713
+ """Clean up session resources including vector stores.
714
+
715
+ Saves the current message history and deletes managed vector stores.
716
+ User vector stores are always cleaned up. System vector store cleanup
717
+ is handled via tool configuration.
718
+
719
+ Notes
720
+ -----
721
+ This method is automatically called when using the response as a
722
+ context manager. Always call close() or use a with statement to
723
+ ensure proper resource cleanup.
724
+
725
+ Examples
726
+ --------
727
+ >>> response = BaseResponse(...)
728
+ >>> try:
729
+ ... result = response.run_sync("query")
730
+ ... finally:
731
+ ... response.close()
732
+ """
576
733
  log(f"Closing session {self.uuid} for {self.__class__.__name__}")
577
734
  self.save()
735
+ # Always clean user vector storage if it exists
578
736
  try:
579
- if self._user_vector_storage and self._cleanup_user_vector_storage:
737
+ if self._user_vector_storage:
580
738
  self._user_vector_storage.delete()
581
739
  log("User vector store deleted.")
582
740
  except Exception as exc:
583
741
  log(f"Error deleting user vector store: {exc}", level=logging.WARNING)
584
- try:
585
- if self._system_vector_storage and self._cleanup_system_vector_storage:
586
- self._system_vector_storage.delete()
587
- log("System vector store deleted.")
588
- except Exception as exc:
589
- log(f"Error deleting system vector store: {exc}", level=logging.WARNING)
742
+ # System vector store cleanup is now handled via tool configuration
590
743
  log(f"Session {self.uuid} closed.")