openai-sdk-helpers 0.3.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. openai_sdk_helpers/__init__.py +6 -6
  2. openai_sdk_helpers/agent/__init__.py +2 -2
  3. openai_sdk_helpers/agent/base.py +231 -110
  4. openai_sdk_helpers/agent/config.py +83 -29
  5. openai_sdk_helpers/agent/coordination.py +64 -28
  6. openai_sdk_helpers/agent/runner.py +16 -15
  7. openai_sdk_helpers/agent/search/base.py +94 -45
  8. openai_sdk_helpers/agent/search/vector.py +86 -58
  9. openai_sdk_helpers/agent/search/web.py +71 -40
  10. openai_sdk_helpers/agent/summarizer.py +32 -7
  11. openai_sdk_helpers/agent/translator.py +57 -24
  12. openai_sdk_helpers/agent/validation.py +34 -4
  13. openai_sdk_helpers/cli.py +42 -0
  14. openai_sdk_helpers/config.py +0 -1
  15. openai_sdk_helpers/environment.py +3 -2
  16. openai_sdk_helpers/files_api.py +35 -3
  17. openai_sdk_helpers/prompt/base.py +6 -0
  18. openai_sdk_helpers/response/__init__.py +3 -3
  19. openai_sdk_helpers/response/base.py +142 -73
  20. openai_sdk_helpers/response/config.py +43 -51
  21. openai_sdk_helpers/response/files.py +5 -5
  22. openai_sdk_helpers/response/messages.py +3 -3
  23. openai_sdk_helpers/response/runner.py +7 -7
  24. openai_sdk_helpers/response/tool_call.py +94 -4
  25. openai_sdk_helpers/response/vector_store.py +3 -3
  26. openai_sdk_helpers/streamlit_app/app.py +16 -16
  27. openai_sdk_helpers/streamlit_app/config.py +38 -37
  28. openai_sdk_helpers/streamlit_app/streamlit_web_search.py +2 -2
  29. openai_sdk_helpers/structure/__init__.py +6 -2
  30. openai_sdk_helpers/structure/agent_blueprint.py +2 -2
  31. openai_sdk_helpers/structure/base.py +8 -99
  32. openai_sdk_helpers/structure/plan/plan.py +2 -2
  33. openai_sdk_helpers/structure/plan/task.py +9 -9
  34. openai_sdk_helpers/structure/prompt.py +2 -2
  35. openai_sdk_helpers/structure/responses.py +15 -15
  36. openai_sdk_helpers/structure/summary.py +3 -3
  37. openai_sdk_helpers/structure/translation.py +32 -0
  38. openai_sdk_helpers/structure/validation.py +2 -2
  39. openai_sdk_helpers/structure/vector_search.py +7 -7
  40. openai_sdk_helpers/structure/web_search.py +6 -6
  41. openai_sdk_helpers/tools.py +41 -15
  42. openai_sdk_helpers/utils/__init__.py +19 -5
  43. openai_sdk_helpers/utils/json/__init__.py +55 -0
  44. openai_sdk_helpers/utils/json/base_model.py +181 -0
  45. openai_sdk_helpers/utils/{json_utils.py → json/data_class.py} +33 -68
  46. openai_sdk_helpers/utils/json/ref.py +113 -0
  47. openai_sdk_helpers/utils/json/utils.py +203 -0
  48. openai_sdk_helpers/utils/output_validation.py +21 -1
  49. openai_sdk_helpers/utils/path_utils.py +34 -1
  50. openai_sdk_helpers/utils/registry.py +17 -6
  51. openai_sdk_helpers/vector_storage/storage.py +10 -0
  52. {openai_sdk_helpers-0.3.0.dist-info → openai_sdk_helpers-0.4.0.dist-info}/METADATA +7 -7
  53. openai_sdk_helpers-0.4.0.dist-info/RECORD +86 -0
  54. openai_sdk_helpers-0.3.0.dist-info/RECORD +0 -81
  55. {openai_sdk_helpers-0.3.0.dist-info → openai_sdk_helpers-0.4.0.dist-info}/WHEEL +0 -0
  56. {openai_sdk_helpers-0.3.0.dist-info → openai_sdk_helpers-0.4.0.dist-info}/entry_points.txt +0 -0
  57. {openai_sdk_helpers-0.3.0.dist-info → openai_sdk_helpers-0.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -5,17 +5,16 @@ from __future__ import annotations
5
5
  from dataclasses import dataclass
6
6
  from pathlib import Path
7
7
  from typing import Generic, Optional, Sequence, Type, TypeVar
8
- from openai.types.responses.response_text_config_param import ResponseTextConfigParam
9
8
 
10
9
  from ..config import OpenAISettings
11
- from ..structure.base import BaseStructure
12
- from ..response.base import BaseResponse, ToolHandler
13
- from ..utils import JSONSerializable
10
+ from ..structure.base import StructureBase
11
+ from ..response.base import ResponseBase, ToolHandler
12
+ from ..utils.json.data_class import DataclassJSONSerializable
14
13
  from ..utils.registry import BaseRegistry
15
14
  from ..utils.instructions import resolve_instructions_from_path
16
15
 
17
- TIn = TypeVar("TIn", bound="BaseStructure")
18
- TOut = TypeVar("TOut", bound="BaseStructure")
16
+ TIn = TypeVar("TIn", bound="StructureBase")
17
+ TOut = TypeVar("TOut", bound="StructureBase")
19
18
 
20
19
 
21
20
  class ResponseRegistry(BaseRegistry["ResponseConfiguration"]):
@@ -43,10 +42,6 @@ class ResponseRegistry(BaseRegistry["ResponseConfiguration"]):
43
42
  pass
44
43
 
45
44
 
46
- # Global default registry instance
47
- _default_registry = ResponseRegistry()
48
-
49
-
50
45
  def get_default_registry() -> ResponseRegistry:
51
46
  """Return the global default registry instance.
52
47
 
@@ -65,13 +60,13 @@ def get_default_registry() -> ResponseRegistry:
65
60
 
66
61
 
67
62
  @dataclass(frozen=True, slots=True)
68
- class ResponseConfiguration(JSONSerializable, Generic[TIn, TOut]):
63
+ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
69
64
  """
70
65
  Represent an immutable configuration describing input and output structures.
71
66
 
72
67
  Encapsulate all metadata required to define how a request is interpreted and
73
68
  how a response is structured, while enforcing strict type and runtime safety.
74
- Inherits from JSONSerializable to support serialization to JSON format.
69
+ Inherits from DataclassJSONSerializable to support serialization to JSON format.
75
70
 
76
71
  Parameters
77
72
  ----------
@@ -82,13 +77,13 @@ class ResponseConfiguration(JSONSerializable, Generic[TIn, TOut]):
82
77
  contents are loaded at runtime.
83
78
  tools : Sequence[object], optional
84
79
  Tool definitions associated with the configuration. Default is None.
85
- input_structure : Type[BaseStructure], optional
80
+ input_structure : Type[StructureBase], optional
86
81
  Structure class used to parse or validate input. Must subclass
87
- BaseStructure. Default is None.
88
- output_structure : Type[BaseStructure], optional
82
+ StructureBase. Default is None.
83
+ output_structure : Type[StructureBase], optional
89
84
  Structure class used to format or validate output. Schema is
90
85
  automatically generated from this structure. Must subclass
91
- BaseStructure. Default is None.
86
+ StructureBase. Default is None.
92
87
  system_vector_store : list[str], optional
93
88
  Optional list of vector store names to attach as system context.
94
89
  Default is None.
@@ -103,7 +98,7 @@ class ResponseConfiguration(JSONSerializable, Generic[TIn, TOut]):
103
98
  If instructions is not a string or Path.
104
99
  If tools is provided and is not a sequence.
105
100
  If input_structure or output_structure is not a class.
106
- If input_structure or output_structure does not subclass BaseStructure.
101
+ If input_structure or output_structure does not subclass StructureBase.
107
102
  ValueError
108
103
  If instructions is a string that is empty or only whitespace.
109
104
  FileNotFoundError
@@ -112,7 +107,7 @@ class ResponseConfiguration(JSONSerializable, Generic[TIn, TOut]):
112
107
  Methods
113
108
  -------
114
109
  __post_init__()
115
- Validate configuration invariants and enforce BaseStructure subclassing.
110
+ Validate configuration invariants and enforce StructureBase subclassing.
116
111
  instructions_text
117
112
  Return the resolved instruction content as a string.
118
113
  to_json()
@@ -142,14 +137,14 @@ class ResponseConfiguration(JSONSerializable, Generic[TIn, TOut]):
142
137
  input_structure: Optional[Type[TIn]]
143
138
  output_structure: Optional[Type[TOut]]
144
139
  system_vector_store: Optional[list[str]] = None
145
- data_path: Optional[Path | str] = None
140
+ add_output_instructions: bool = True
146
141
 
147
142
  def __post_init__(self) -> None:
148
143
  """
149
144
  Validate configuration invariants after initialization.
150
145
 
151
146
  Enforce non-empty naming, correct typing of structures, and ensure that
152
- any declared structure subclasses BaseStructure.
147
+ any declared structure subclasses StructureBase.
153
148
 
154
149
  Raises
155
150
  ------
@@ -157,7 +152,7 @@ class ResponseConfiguration(JSONSerializable, Generic[TIn, TOut]):
157
152
  If name is not a non-empty string.
158
153
  If tools is provided and is not a sequence.
159
154
  If input_structure or output_structure is not a class.
160
- If input_structure or output_structure does not subclass BaseStructure.
155
+ If input_structure or output_structure does not subclass StructureBase.
161
156
  """
162
157
  if not self.name or not isinstance(self.name, str):
163
158
  raise TypeError("Configuration.name must be a non-empty str")
@@ -181,10 +176,10 @@ class ResponseConfiguration(JSONSerializable, Generic[TIn, TOut]):
181
176
  continue
182
177
  if not isinstance(cls, type):
183
178
  raise TypeError(
184
- f"Configuration.{attr} must be a class (Type[BaseStructure]) or None"
179
+ f"Configuration.{attr} must be a class (Type[StructureBase]) or None"
185
180
  )
186
- if not issubclass(cls, BaseStructure):
187
- raise TypeError(f"Configuration.{attr} must subclass BaseStructure")
181
+ if not issubclass(cls, StructureBase):
182
+ raise TypeError(f"Configuration.{attr} must subclass StructureBase")
188
183
 
189
184
  if self.tools is not None and not isinstance(self.tools, Sequence):
190
185
  raise TypeError("Configuration.tools must be a Sequence or None")
@@ -198,54 +193,51 @@ class ResponseConfiguration(JSONSerializable, Generic[TIn, TOut]):
198
193
  str
199
194
  Plain-text instructions, loading template files when necessary.
200
195
  """
201
- return self._resolve_instructions()
196
+ resolved_instructions: str = resolve_instructions_from_path(self.instructions)
197
+ output_instructions = ""
198
+ if self.output_structure is not None and self.add_output_instructions:
199
+ output_instructions = self.output_structure.get_prompt(
200
+ add_enum_values=False
201
+ )
202
+ if output_instructions:
203
+ return f"{resolved_instructions}\n{output_instructions}"
202
204
 
203
- def _resolve_instructions(self) -> str:
204
- return resolve_instructions_from_path(self.instructions)
205
+ return resolved_instructions
205
206
 
206
207
  def gen_response(
207
208
  self,
209
+ *,
208
210
  openai_settings: OpenAISettings,
209
- tool_handlers: dict[str, ToolHandler] = {},
210
- add_output_instructions: bool = True,
211
- ) -> BaseResponse[TOut]:
212
- """Generate a BaseResponse instance based on the configuration.
211
+ data_path: Optional[Path] = None,
212
+ tool_handlers: dict[str, ToolHandler] | None = None,
213
+ ) -> ResponseBase[TOut]:
214
+ """Generate a ResponseBase instance based on the configuration.
213
215
 
214
216
  Parameters
215
217
  ----------
216
218
  openai_settings : OpenAISettings
217
219
  Authentication and model settings applied to the generated
218
- :class:`BaseResponse`.
220
+ :class:`ResponseBase`.
219
221
  tool_handlers : dict[str, Callable], optional
220
222
  Mapping of tool names to handler callables. Defaults to an empty
221
223
  dictionary when not provided.
222
- add_output_instructions : bool, default=True
223
- Whether to append the structured output prompt to the instructions.
224
224
 
225
225
  Returns
226
226
  -------
227
- BaseResponse[TOut]
228
- An instance of BaseResponse configured with ``openai_settings``.
227
+ ResponseBase[TOut]
228
+ An instance of ResponseBase configured with ``openai_settings``.
229
229
  """
230
- output_instructions = ""
231
- if self.output_structure is not None and add_output_instructions:
232
- output_instructions = self.output_structure.get_prompt(
233
- add_enum_values=False
234
- )
235
-
236
- instructions = (
237
- f"{self.instructions_text}\n{output_instructions}"
238
- if output_instructions
239
- else self.instructions_text
240
- )
241
-
242
- return BaseResponse[TOut](
230
+ return ResponseBase[TOut](
243
231
  name=self.name,
244
- instructions=instructions,
232
+ instructions=self.instructions_text,
245
233
  tools=self.tools,
246
234
  output_structure=self.output_structure,
247
235
  system_vector_store=self.system_vector_store,
248
- data_path=self.data_path,
236
+ data_path=data_path,
249
237
  tool_handlers=tool_handlers,
250
238
  openai_settings=openai_settings,
251
239
  )
240
+
241
+
242
+ # Global default registry instance
243
+ _default_registry = ResponseRegistry()
@@ -23,11 +23,11 @@ from openai.types.responses.response_input_image_content_param import (
23
23
  from ..utils import create_file_data_url, create_image_data_url, is_image_file, log
24
24
 
25
25
  if TYPE_CHECKING: # pragma: no cover
26
- from .base import BaseResponse
26
+ from .base import ResponseBase
27
27
 
28
28
 
29
29
  def process_files(
30
- response: BaseResponse[Any],
30
+ response: ResponseBase[Any],
31
31
  files: list[str],
32
32
  use_vector_store: bool = False,
33
33
  batch_size: int = 10,
@@ -45,7 +45,7 @@ def process_files(
45
45
 
46
46
  Parameters
47
47
  ----------
48
- response : BaseResponse[Any]
48
+ response : ResponseBase[Any]
49
49
  Response instance that will use the processed files.
50
50
  files : list[str]
51
51
  List of file paths to process.
@@ -114,7 +114,7 @@ def process_files(
114
114
 
115
115
 
116
116
  def _upload_to_vector_store(
117
- response: BaseResponse[Any], document_files: list[str]
117
+ response: ResponseBase[Any], document_files: list[str]
118
118
  ) -> list[ResponseInputFileParam]:
119
119
  """Upload documents to vector store and return file references.
120
120
 
@@ -123,7 +123,7 @@ def _upload_to_vector_store(
123
123
 
124
124
  Parameters
125
125
  ----------
126
- response : BaseResponse[Any]
126
+ response : ResponseBase[Any]
127
127
  Response instance with vector storage.
128
128
  document_files : list[str]
129
129
  List of document file paths to upload.
@@ -24,12 +24,12 @@ from openai.types.responses.response_input_param import (
24
24
  )
25
25
  from openai.types.responses.response_output_message import ResponseOutputMessage
26
26
 
27
- from ..utils import JSONSerializable
27
+ from ..utils.json.data_class import DataclassJSONSerializable
28
28
  from .tool_call import ResponseToolCall
29
29
 
30
30
 
31
31
  @dataclass
32
- class ResponseMessage(JSONSerializable):
32
+ class ResponseMessage(DataclassJSONSerializable):
33
33
  """Single message exchanged with the OpenAI API.
34
34
 
35
35
  Represents a complete message with role, content, timestamp, and
@@ -91,7 +91,7 @@ class ResponseMessage(JSONSerializable):
91
91
 
92
92
 
93
93
  @dataclass
94
- class ResponseMessages(JSONSerializable):
94
+ class ResponseMessages(DataclassJSONSerializable):
95
95
  """Collection of messages in a conversation.
96
96
 
97
97
  Manages the complete history of messages exchanged during an OpenAI
@@ -10,10 +10,10 @@ from __future__ import annotations
10
10
  import asyncio
11
11
  from typing import Any, TypeVar
12
12
 
13
- from .base import BaseResponse
13
+ from .base import ResponseBase
14
14
 
15
15
 
16
- R = TypeVar("R", bound=BaseResponse[Any])
16
+ R = TypeVar("R", bound=ResponseBase[Any])
17
17
 
18
18
 
19
19
  def run_sync(
@@ -29,7 +29,7 @@ def run_sync(
29
29
 
30
30
  Parameters
31
31
  ----------
32
- response_cls : type[BaseResponse]
32
+ response_cls : type[ResponseBase]
33
33
  Response class to instantiate for the workflow.
34
34
  content : str
35
35
  Prompt text to send to the OpenAI API.
@@ -39,7 +39,7 @@ def run_sync(
39
39
  Returns
40
40
  -------
41
41
  Any
42
- Parsed response from BaseResponse.run_sync, typically a structured
42
+ Parsed response from ResponseBase.run_sync, typically a structured
43
43
  output or None.
44
44
 
45
45
  Examples
@@ -71,7 +71,7 @@ async def run_async(
71
71
 
72
72
  Parameters
73
73
  ----------
74
- response_cls : type[BaseResponse]
74
+ response_cls : type[ResponseBase]
75
75
  Response class to instantiate for the workflow.
76
76
  content : str
77
77
  Prompt text to send to the OpenAI API.
@@ -81,7 +81,7 @@ async def run_async(
81
81
  Returns
82
82
  -------
83
83
  Any
84
- Parsed response from BaseResponse.run_async, typically a structured
84
+ Parsed response from ResponseBase.run_async, typically a structured
85
85
  output or None.
86
86
 
87
87
  Examples
@@ -114,7 +114,7 @@ def run_streamed(
114
114
 
115
115
  Parameters
116
116
  ----------
117
- response_cls : type[BaseResponse]
117
+ response_cls : type[ResponseBase]
118
118
  Response class to instantiate for the workflow.
119
119
  content : str
120
120
  Prompt text to send to the OpenAI API.
@@ -9,16 +9,18 @@ from __future__ import annotations
9
9
 
10
10
  import ast
11
11
  import json
12
+ import re
12
13
  from dataclasses import dataclass
13
14
 
14
15
  from openai.types.responses.response_function_tool_call_param import (
15
16
  ResponseFunctionToolCallParam,
16
17
  )
17
18
  from openai.types.responses.response_input_param import FunctionCallOutput
19
+ from ..utils.json.data_class import DataclassJSONSerializable
18
20
 
19
21
 
20
22
  @dataclass
21
- class ResponseToolCall:
23
+ class ResponseToolCall(DataclassJSONSerializable):
22
24
  """Container for tool call data in a conversation.
23
25
 
24
26
  Stores the complete information about a tool invocation including
@@ -94,6 +96,85 @@ class ResponseToolCall:
94
96
  return function_call, function_call_output
95
97
 
96
98
 
99
+ def _to_snake_case(name: str) -> str:
100
+ """Convert a PascalCase or camelCase string to snake_case.
101
+
102
+ Parameters
103
+ ----------
104
+ name : str
105
+ The name to convert.
106
+
107
+ Returns
108
+ -------
109
+ str
110
+ The snake_case version of the name.
111
+
112
+ Examples
113
+ --------
114
+ >>> _to_snake_case("ExampleStructure")
115
+ 'example_structure'
116
+ >>> _to_snake_case("MyToolName")
117
+ 'my_tool_name'
118
+ """
119
+ # First regex: Insert underscore before uppercase letters followed by
120
+ # lowercase letters (e.g., "Tool" in "ExampleTool" becomes "_Tool")
121
+ s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
122
+ # Second regex: Insert underscore between lowercase/digit and uppercase
123
+ # (e.g., "e3" followed by "T" becomes "e3_T")
124
+ return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
125
+
126
+
127
+ def _unwrap_arguments(parsed: dict, tool_name: str) -> dict:
128
+ """Unwrap arguments if wrapped in a single-key dict.
129
+
130
+ Some responses wrap arguments under a key matching the structure class
131
+ name (e.g., {"ExampleStructure": {...}}) or snake_case variant
132
+ (e.g., {"example_structure": {...}}). This function detects and unwraps
133
+ such wrappers to normalize the payload.
134
+
135
+ Parameters
136
+ ----------
137
+ parsed : dict
138
+ The parsed arguments dictionary.
139
+ tool_name : str
140
+ The tool name, used to match potential wrapper keys.
141
+
142
+ Returns
143
+ -------
144
+ dict
145
+ Unwrapped arguments dictionary, or original if no wrapper detected.
146
+
147
+ Examples
148
+ --------
149
+ >>> _unwrap_arguments({"ExampleTool": {"arg": "value"}}, "ExampleTool")
150
+ {'arg': 'value'}
151
+ >>> _unwrap_arguments({"example_tool": {"arg": "value"}}, "ExampleTool")
152
+ {'arg': 'value'}
153
+ >>> _unwrap_arguments({"arg": "value"}, "ExampleTool")
154
+ {'arg': 'value'}
155
+ """
156
+ # Only unwrap if dict has exactly one key
157
+ if not isinstance(parsed, dict) or len(parsed) != 1:
158
+ return parsed
159
+
160
+ wrapper_key = next(iter(parsed))
161
+ wrapped_value = parsed[wrapper_key]
162
+
163
+ # Only unwrap if the value is also a dict
164
+ if not isinstance(wrapped_value, dict):
165
+ return parsed
166
+
167
+ # Check if wrapper key matches tool name (case-insensitive or snake_case)
168
+ tool_name_lower = tool_name.lower()
169
+ tool_name_snake = _to_snake_case(tool_name)
170
+ wrapper_key_lower = wrapper_key.lower()
171
+
172
+ if wrapper_key_lower in (tool_name_lower, tool_name_snake):
173
+ return wrapped_value
174
+
175
+ return parsed
176
+
177
+
97
178
  def parse_tool_arguments(arguments: str, tool_name: str) -> dict:
98
179
  """Parse tool call arguments with fallback for malformed JSON.
99
180
 
@@ -102,6 +183,9 @@ def parse_tool_arguments(arguments: str, tool_name: str) -> dict:
102
183
  formatting issues like single quotes instead of double quotes.
103
184
  Provides clear error context including tool name and raw payload.
104
185
 
186
+ Also handles unwrapping of arguments that are wrapped in a single-key
187
+ dictionary matching the tool name (e.g., {"ExampleStructure": {...}}).
188
+
105
189
  Parameters
106
190
  ----------
107
191
  arguments : str
@@ -112,7 +196,7 @@ def parse_tool_arguments(arguments: str, tool_name: str) -> dict:
112
196
  Returns
113
197
  -------
114
198
  dict
115
- Parsed dictionary of tool arguments.
199
+ Parsed dictionary of tool arguments, with wrapper unwrapped if present.
116
200
 
117
201
  Raises
118
202
  ------
@@ -127,12 +211,15 @@ def parse_tool_arguments(arguments: str, tool_name: str) -> dict:
127
211
 
128
212
  >>> parse_tool_arguments("{'key': 'value'}", tool_name="search")
129
213
  {'key': 'value'}
214
+
215
+ >>> parse_tool_arguments('{"ExampleTool": {"arg": "value"}}', "ExampleTool")
216
+ {'arg': 'value'}
130
217
  """
131
218
  try:
132
- return json.loads(arguments)
219
+ parsed = json.loads(arguments)
133
220
  except json.JSONDecodeError:
134
221
  try:
135
- return ast.literal_eval(arguments)
222
+ parsed = ast.literal_eval(arguments)
136
223
  except Exception as exc: # noqa: BLE001
137
224
  # Build informative error message with context
138
225
  payload_preview = (
@@ -142,3 +229,6 @@ def parse_tool_arguments(arguments: str, tool_name: str) -> dict:
142
229
  f"Failed to parse tool arguments for tool '{tool_name}'. "
143
230
  f"Raw payload: {payload_preview}"
144
231
  ) from exc
232
+
233
+ # Unwrap if wrapped in a single-key dict matching tool name
234
+ return _unwrap_arguments(parsed, tool_name)
@@ -11,11 +11,11 @@ from typing import Any, Sequence
11
11
  from openai import OpenAI
12
12
 
13
13
  from ..utils import ensure_list
14
- from .base import BaseResponse
14
+ from .base import ResponseBase
15
15
 
16
16
 
17
17
  def attach_vector_store(
18
- response: BaseResponse[Any],
18
+ response: ResponseBase[Any],
19
19
  vector_stores: str | Sequence[str],
20
20
  api_key: str | None = None,
21
21
  ) -> list[str]:
@@ -27,7 +27,7 @@ def attach_vector_store(
27
27
 
28
28
  Parameters
29
29
  ----------
30
- response : BaseResponse[Any]
30
+ response : ResponseBase[Any]
31
31
  Response instance whose tool configuration will be updated.
32
32
  vector_stores : str or Sequence[str]
33
33
  Single vector store name or sequence of names to attach.
@@ -18,12 +18,12 @@ from dotenv import load_dotenv
18
18
 
19
19
  load_dotenv()
20
20
 
21
- from openai_sdk_helpers.response import BaseResponse, attach_vector_store
21
+ from openai_sdk_helpers.response import ResponseBase, attach_vector_store
22
22
  from openai_sdk_helpers.streamlit_app import (
23
23
  StreamlitAppConfig,
24
24
  _load_configuration,
25
25
  )
26
- from openai_sdk_helpers.structure.base import BaseStructure
26
+ from openai_sdk_helpers.structure.base import StructureBase
27
27
  from openai_sdk_helpers.utils import (
28
28
  coerce_jsonable,
29
29
  customJSONEncoder,
@@ -96,7 +96,7 @@ def _cleanup_temp_files(file_paths: list[str] | None = None) -> None:
96
96
  st.session_state["temp_file_paths"] = []
97
97
 
98
98
 
99
- def _extract_assistant_text(response: BaseResponse[Any]) -> str:
99
+ def _extract_assistant_text(response: ResponseBase[Any]) -> str:
100
100
  """Extract the latest assistant message as readable text.
101
101
 
102
102
  Searches the response's message history for the most recent assistant
@@ -104,7 +104,7 @@ def _extract_assistant_text(response: BaseResponse[Any]) -> str:
104
104
 
105
105
  Parameters
106
106
  ----------
107
- response : BaseResponse[Any]
107
+ response : ResponseBase[Any]
108
108
  Active response session with message history.
109
109
 
110
110
  Returns
@@ -153,7 +153,7 @@ def _extract_assistant_text(response: BaseResponse[Any]) -> str:
153
153
  return ""
154
154
 
155
155
 
156
- def _render_summary(result: Any, response: BaseResponse[Any]) -> str:
156
+ def _render_summary(result: Any, response: ResponseBase[Any]) -> str:
157
157
  """Generate display text for the chat transcript.
158
158
 
159
159
  Converts the response result into a human-readable format suitable
@@ -163,8 +163,8 @@ def _render_summary(result: Any, response: BaseResponse[Any]) -> str:
163
163
  Parameters
164
164
  ----------
165
165
  result : Any
166
- Parsed result from BaseResponse.run_sync.
167
- response : BaseResponse[Any]
166
+ Parsed result from ResponseBase.run_sync.
167
+ response : ResponseBase[Any]
168
168
  Response instance containing message history.
169
169
 
170
170
  Returns
@@ -177,7 +177,7 @@ def _render_summary(result: Any, response: BaseResponse[Any]) -> str:
177
177
  Falls back to extracting assistant text from message history if
178
178
  the result cannot be formatted directly.
179
179
  """
180
- if isinstance(result, BaseStructure):
180
+ if isinstance(result, StructureBase):
181
181
  return result.print()
182
182
  if isinstance(result, str):
183
183
  return result
@@ -196,7 +196,7 @@ def _render_summary(result: Any, response: BaseResponse[Any]) -> str:
196
196
  return "No response returned."
197
197
 
198
198
 
199
- def _build_raw_output(result: Any, response: BaseResponse[Any]) -> dict[str, Any]:
199
+ def _build_raw_output(result: Any, response: ResponseBase[Any]) -> dict[str, Any]:
200
200
  """Assemble raw JSON payload for the expandable transcript section.
201
201
 
202
202
  Creates a structured dictionary containing both the parsed result
@@ -206,7 +206,7 @@ def _build_raw_output(result: Any, response: BaseResponse[Any]) -> dict[str, Any
206
206
  ----------
207
207
  result : Any
208
208
  Parsed result from the response execution.
209
- response : BaseResponse[Any]
209
+ response : ResponseBase[Any]
210
210
  Response session with complete message history.
211
211
 
212
212
  Returns
@@ -226,8 +226,8 @@ def _build_raw_output(result: Any, response: BaseResponse[Any]) -> dict[str, Any
226
226
  }
227
227
 
228
228
 
229
- def _get_response_instance(config: StreamlitAppConfig) -> BaseResponse[Any]:
230
- """Instantiate and cache the configured BaseResponse.
229
+ def _get_response_instance(config: StreamlitAppConfig) -> ResponseBase[Any]:
230
+ """Instantiate and cache the configured ResponseBase.
231
231
 
232
232
  Creates a new response instance from the configuration if not already
233
233
  cached in session state. Applies vector store attachments and cleanup
@@ -240,13 +240,13 @@ def _get_response_instance(config: StreamlitAppConfig) -> BaseResponse[Any]:
240
240
 
241
241
  Returns
242
242
  -------
243
- BaseResponse[Any]
243
+ ResponseBase[Any]
244
244
  Active response instance for the current Streamlit session.
245
245
 
246
246
  Raises
247
247
  ------
248
248
  TypeError
249
- If the configured response cannot produce a BaseResponse.
249
+ If the configured response cannot produce a ResponseBase.
250
250
 
251
251
  Notes
252
252
  -----
@@ -255,7 +255,7 @@ def _get_response_instance(config: StreamlitAppConfig) -> BaseResponse[Any]:
255
255
  """
256
256
  if "response_instance" in st.session_state:
257
257
  cached = st.session_state["response_instance"]
258
- if isinstance(cached, BaseResponse):
258
+ if isinstance(cached, ResponseBase):
259
259
  return cached
260
260
 
261
261
  response = config.create_response()
@@ -291,7 +291,7 @@ def _reset_chat(close_response: bool = True) -> None:
291
291
  chat_history, response_instance, and temp_file_paths keys.
292
292
  """
293
293
  response = st.session_state.get("response_instance")
294
- if close_response and isinstance(response, BaseResponse):
294
+ if close_response and isinstance(response, ResponseBase):
295
295
  filepath = f"./data/{response.name}.{response.uuid}.json"
296
296
  response.save(filepath)
297
297
  response.close()