openai-sdk-helpers 0.4.2__py3-none-any.whl → 0.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. openai_sdk_helpers/__init__.py +6 -36
  2. openai_sdk_helpers/agent/__init__.py +3 -4
  3. openai_sdk_helpers/agent/base.py +34 -31
  4. openai_sdk_helpers/agent/{config.py → configuration.py} +13 -13
  5. openai_sdk_helpers/agent/{coordination.py → coordinator.py} +12 -10
  6. openai_sdk_helpers/agent/search/base.py +16 -16
  7. openai_sdk_helpers/agent/search/vector.py +25 -13
  8. openai_sdk_helpers/agent/search/web.py +5 -5
  9. openai_sdk_helpers/agent/summarizer.py +6 -4
  10. openai_sdk_helpers/agent/translator.py +9 -5
  11. openai_sdk_helpers/agent/{validation.py → validator.py} +6 -4
  12. openai_sdk_helpers/cli.py +8 -22
  13. openai_sdk_helpers/environment.py +8 -13
  14. openai_sdk_helpers/prompt/vector_planner.jinja +7 -0
  15. openai_sdk_helpers/prompt/vector_search.jinja +6 -0
  16. openai_sdk_helpers/prompt/vector_writer.jinja +7 -0
  17. openai_sdk_helpers/response/__init__.py +1 -1
  18. openai_sdk_helpers/response/base.py +4 -4
  19. openai_sdk_helpers/response/{config.py → configuration.py} +8 -8
  20. openai_sdk_helpers/response/planner.py +1 -1
  21. openai_sdk_helpers/response/prompter.py +1 -1
  22. openai_sdk_helpers/streamlit_app/__init__.py +1 -1
  23. openai_sdk_helpers/streamlit_app/app.py +16 -17
  24. openai_sdk_helpers/streamlit_app/{config.py → configuration.py} +13 -13
  25. openai_sdk_helpers/streamlit_app/streamlit_web_search.py +2 -2
  26. openai_sdk_helpers/types.py +3 -3
  27. openai_sdk_helpers/utils/__init__.py +2 -6
  28. openai_sdk_helpers/utils/json/base_model.py +1 -1
  29. openai_sdk_helpers/utils/json/data_class.py +1 -1
  30. openai_sdk_helpers/utils/registry.py +19 -15
  31. openai_sdk_helpers/vector_storage/storage.py +1 -1
  32. {openai_sdk_helpers-0.4.2.dist-info → openai_sdk_helpers-0.4.3.dist-info}/METADATA +8 -8
  33. {openai_sdk_helpers-0.4.2.dist-info → openai_sdk_helpers-0.4.3.dist-info}/RECORD +38 -40
  34. openai_sdk_helpers/agent/prompt_utils.py +0 -15
  35. openai_sdk_helpers/context_manager.py +0 -241
  36. openai_sdk_helpers/deprecation.py +0 -167
  37. openai_sdk_helpers/retry.py +0 -175
  38. openai_sdk_helpers/utils/deprecation.py +0 -167
  39. /openai_sdk_helpers/{logging_config.py → logging.py} +0 -0
  40. /openai_sdk_helpers/{config.py → settings.py} +0 -0
  41. {openai_sdk_helpers-0.4.2.dist-info → openai_sdk_helpers-0.4.3.dist-info}/WHEEL +0 -0
  42. {openai_sdk_helpers-0.4.2.dist-info → openai_sdk_helpers-0.4.3.dist-info}/entry_points.txt +0 -0
  43. {openai_sdk_helpers-0.4.2.dist-info → openai_sdk_helpers-0.4.3.dist-info}/licenses/LICENSE +0 -0
@@ -6,7 +6,9 @@ from pathlib import Path
6
6
  from typing import Any, Callable, Dict, List, Optional
7
7
 
8
8
  from agents import custom_span, gen_trace_id, trace
9
+ from agents.model_settings import ModelSettings
9
10
 
11
+ from ...environment import DEFAULT_PROMPT_DIR
10
12
  from ...structure.prompt import PromptStructure
11
13
  from ...structure.vector_search import (
12
14
  VectorSearchItemStructure,
@@ -18,7 +20,7 @@ from ...structure.vector_search import (
18
20
  )
19
21
  from ...tools import tool_handler_factory
20
22
  from ...vector_storage import VectorStorage
21
- from ..config import AgentConfiguration
23
+ from ..configuration import AgentConfiguration
22
24
  from ..utils import run_coroutine_agent_sync
23
25
  from .base import SearchPlanner, SearchToolAgent, SearchWriter
24
26
 
@@ -31,9 +33,10 @@ class VectorAgentPlanner(SearchPlanner[VectorSearchPlanStructure]):
31
33
  Parameters
32
34
  ----------
33
35
  prompt_dir : Path or None, default=None
34
- Directory containing prompt templates.
36
+ Directory containing prompt templates. Defaults to the packaged
37
+ ``prompt`` directory when not provided.
35
38
  default_model : str or None, default=None
36
- Default model identifier to use when not defined in config.
39
+ Default model identifier to use when not defined in configuration.
37
40
 
38
41
  Methods
39
42
  -------
@@ -54,7 +57,8 @@ class VectorAgentPlanner(SearchPlanner[VectorSearchPlanStructure]):
54
57
  self, prompt_dir: Optional[Path] = None, default_model: Optional[str] = None
55
58
  ) -> None:
56
59
  """Initialize the planner agent."""
57
- super().__init__(prompt_dir=prompt_dir, default_model=default_model)
60
+ prompt_directory = prompt_dir or DEFAULT_PROMPT_DIR
61
+ super().__init__(prompt_dir=prompt_directory, default_model=default_model)
58
62
 
59
63
  def _configure_agent(self) -> AgentConfiguration:
60
64
  """Return configuration for the vector planner agent.
@@ -69,6 +73,7 @@ class VectorAgentPlanner(SearchPlanner[VectorSearchPlanStructure]):
69
73
  instructions="Agent instructions",
70
74
  description="Plan vector searches based on a user query.",
71
75
  output_structure=VectorSearchPlanStructure,
76
+ model_settings=ModelSettings(tool_choice="none"),
72
77
  )
73
78
 
74
79
 
@@ -84,9 +89,10 @@ class VectorSearchTool(
84
89
  Parameters
85
90
  ----------
86
91
  prompt_dir : Path or None, default=None
87
- Directory containing prompt templates.
92
+ Directory containing prompt templates. Defaults to the packaged
93
+ ``prompt`` directory when not provided.
88
94
  default_model : str or None, default=None
89
- Default model identifier to use when not defined in config.
95
+ Default model identifier to use when not defined in configuration.
90
96
  store_name : str or None, default=None
91
97
  Name of the vector store to query.
92
98
  max_concurrent_searches : int, default=MAX_CONCURRENT_SEARCHES
@@ -125,13 +131,14 @@ class VectorSearchTool(
125
131
  vector_storage_factory: Optional[Callable[[str], VectorStorage]] = None,
126
132
  ) -> None:
127
133
  """Initialize the search tool agent."""
134
+ prompt_directory = prompt_dir or DEFAULT_PROMPT_DIR
128
135
  self._vector_storage: Optional[VectorStorage] = None
129
136
  self._store_name = store_name or "editorial"
130
137
  self._vector_storage_factory = vector_storage_factory
131
138
  if vector_storage is not None:
132
139
  self._vector_storage = vector_storage
133
140
  super().__init__(
134
- prompt_dir=prompt_dir,
141
+ prompt_dir=prompt_directory,
135
142
  default_model=default_model,
136
143
  max_concurrent_searches=max_concurrent_searches,
137
144
  )
@@ -150,6 +157,7 @@ class VectorSearchTool(
150
157
  description="Perform vector searches based on a search plan.",
151
158
  input_structure=VectorSearchPlanStructure,
152
159
  output_structure=VectorSearchItemResultsStructure,
160
+ model_settings=ModelSettings(tool_choice="none"),
153
161
  )
154
162
 
155
163
  def _get_vector_storage(self) -> VectorStorage:
@@ -202,9 +210,10 @@ class VectorSearchWriter(SearchWriter[VectorSearchReportStructure]):
202
210
  Parameters
203
211
  ----------
204
212
  prompt_dir : Path or None, default=None
205
- Directory containing prompt templates.
213
+ Directory containing prompt templates. Defaults to the packaged
214
+ ``prompt`` directory when not provided.
206
215
  default_model : str or None, default=None
207
- Default model identifier to use when not defined in config.
216
+ Default model identifier to use when not defined in configuration.
208
217
 
209
218
  Methods
210
219
  -------
@@ -225,7 +234,8 @@ class VectorSearchWriter(SearchWriter[VectorSearchReportStructure]):
225
234
  self, prompt_dir: Optional[Path] = None, default_model: Optional[str] = None
226
235
  ) -> None:
227
236
  """Initialize the writer agent."""
228
- super().__init__(prompt_dir=prompt_dir, default_model=default_model)
237
+ prompt_directory = prompt_dir or DEFAULT_PROMPT_DIR
238
+ super().__init__(prompt_dir=prompt_directory, default_model=default_model)
229
239
 
230
240
  def _configure_agent(self) -> AgentConfiguration:
231
241
  """Return configuration for the vector writer agent.
@@ -240,6 +250,7 @@ class VectorSearchWriter(SearchWriter[VectorSearchReportStructure]):
240
250
  instructions="Agent instructions",
241
251
  description="Write a report based on search results.",
242
252
  output_structure=VectorSearchReportStructure,
253
+ model_settings=ModelSettings(tool_choice="none"),
243
254
  )
244
255
 
245
256
 
@@ -254,9 +265,10 @@ class VectorAgentSearch:
254
265
  Parameters
255
266
  ----------
256
267
  prompt_dir : Path or None, default=None
257
- Directory containing prompt templates.
268
+ Directory containing prompt templates. Defaults to the packaged
269
+ ``prompt`` directory when not provided.
258
270
  default_model : str or None, default=None
259
- Default model identifier to use when not defined in config.
271
+ Default model identifier to use when not defined in configuration.
260
272
  vector_store_name : str or None, default=None
261
273
  Name of the vector store to query.
262
274
  max_concurrent_searches : int, default=MAX_CONCURRENT_SEARCHES
@@ -318,7 +330,7 @@ class VectorAgentSearch:
318
330
  vector_storage_factory: Optional[Callable[[str], VectorStorage]] = None,
319
331
  ) -> None:
320
332
  """Create the main VectorSearch agent."""
321
- self._prompt_dir = prompt_dir
333
+ self._prompt_dir = prompt_dir or DEFAULT_PROMPT_DIR
322
334
  self._default_model = default_model
323
335
  self._vector_store_name = vector_store_name
324
336
  self._max_concurrent_searches = max_concurrent_searches
@@ -18,7 +18,7 @@ from ...structure.web_search import (
18
18
  WebSearchReportStructure,
19
19
  )
20
20
  from ...tools import tool_handler_factory
21
- from ..config import AgentConfiguration
21
+ from ..configuration import AgentConfiguration
22
22
  from ..utils import run_coroutine_agent_sync
23
23
  from .base import SearchPlanner, SearchToolAgent, SearchWriter
24
24
 
@@ -33,7 +33,7 @@ class WebAgentPlanner(SearchPlanner[WebSearchPlanStructure]):
33
33
  prompt_dir : Path or None, default=None
34
34
  Directory containing prompt templates.
35
35
  default_model : str or None, default=None
36
- Default model identifier to use when not defined in config.
36
+ Default model identifier to use when not defined in configuration.
37
37
 
38
38
  Methods
39
39
  -------
@@ -84,7 +84,7 @@ class WebSearchToolAgent(
84
84
  prompt_dir : Path or None, default=None
85
85
  Directory containing prompt templates.
86
86
  default_model : str or None, default=None
87
- Default model identifier to use when not defined in config.
87
+ Default model identifier to use when not defined in configuration.
88
88
 
89
89
  Methods
90
90
  -------
@@ -189,7 +189,7 @@ class WebAgentWriter(SearchWriter[WebSearchReportStructure]):
189
189
  prompt_dir : Path or None, default=None
190
190
  Directory containing prompt templates.
191
191
  default_model : str or None, default=None
192
- Default model identifier to use when not defined in config.
192
+ Default model identifier to use when not defined in configuration.
193
193
 
194
194
  Methods
195
195
  -------
@@ -236,7 +236,7 @@ class WebAgentSearch:
236
236
  prompt_dir : Path or None, default=None
237
237
  Directory containing prompt templates.
238
238
  default_model : str or None, default=None
239
- Default model identifier to use when not defined in config.
239
+ Default model identifier to use when not defined in configuration.
240
240
 
241
241
  Methods
242
242
  -------
@@ -8,8 +8,8 @@ from typing import Any, Dict, Optional, Type
8
8
  from ..structure import SummaryStructure
9
9
  from ..structure.base import StructureBase
10
10
  from .base import AgentBase
11
- from .config import AgentConfiguration
12
- from .prompt_utils import DEFAULT_PROMPT_DIR
11
+ from .configuration import AgentConfiguration
12
+ from ..environment import DEFAULT_PROMPT_DIR
13
13
 
14
14
 
15
15
  class SummarizerAgent(AgentBase):
@@ -84,7 +84,7 @@ class SummarizerAgent(AgentBase):
84
84
  --------
85
85
  >>> summarizer = SummarizerAgent(default_model="gpt-4o-mini")
86
86
  """
87
- config = AgentConfiguration(
87
+ configuration = AgentConfiguration(
88
88
  name="summarizer",
89
89
  instructions="Agent instructions",
90
90
  description="Summarize passages into concise findings.",
@@ -92,7 +92,9 @@ class SummarizerAgent(AgentBase):
92
92
  )
93
93
  prompt_directory = prompt_dir or DEFAULT_PROMPT_DIR
94
94
  super().__init__(
95
- config=config, prompt_dir=prompt_directory, default_model=default_model
95
+ configuration=configuration,
96
+ prompt_dir=prompt_directory,
97
+ default_model=default_model,
96
98
  )
97
99
 
98
100
  async def run_agent(
@@ -5,11 +5,13 @@ from __future__ import annotations
5
5
  from pathlib import Path
6
6
  from typing import Any, Dict, Optional
7
7
 
8
- from .base import AgentBase
9
- from .config import AgentConfiguration
10
- from .prompt_utils import DEFAULT_PROMPT_DIR
8
+
11
9
  from ..structure import TranslationStructure
12
10
  from ..structure.base import StructureBase
11
+ from ..environment import DEFAULT_PROMPT_DIR
12
+
13
+ from .base import AgentBase
14
+ from .configuration import AgentConfiguration
13
15
 
14
16
 
15
17
  class TranslatorAgent(AgentBase):
@@ -82,7 +84,7 @@ class TranslatorAgent(AgentBase):
82
84
  --------
83
85
  >>> translator = TranslatorAgent(default_model="gpt-4o-mini")
84
86
  """
85
- config = AgentConfiguration(
87
+ configuration = AgentConfiguration(
86
88
  name="translator",
87
89
  instructions="Agent instructions",
88
90
  description="Translate text into the requested language.",
@@ -90,7 +92,9 @@ class TranslatorAgent(AgentBase):
90
92
  )
91
93
  prompt_directory = prompt_dir or DEFAULT_PROMPT_DIR
92
94
  super().__init__(
93
- config=config, prompt_dir=prompt_directory, default_model=default_model
95
+ configuration=configuration,
96
+ prompt_dir=prompt_directory,
97
+ default_model=default_model,
94
98
  )
95
99
 
96
100
  async def run_agent(
@@ -5,10 +5,10 @@ from __future__ import annotations
5
5
  from pathlib import Path
6
6
  from typing import Any, Dict, Optional
7
7
 
8
+ from ..environment import DEFAULT_PROMPT_DIR
8
9
  from ..structure.validation import ValidationResultStructure
9
10
  from .base import AgentBase
10
- from .config import AgentConfiguration
11
- from .prompt_utils import DEFAULT_PROMPT_DIR
11
+ from .configuration import AgentConfiguration
12
12
 
13
13
 
14
14
  class ValidatorAgent(AgentBase):
@@ -81,7 +81,7 @@ class ValidatorAgent(AgentBase):
81
81
  --------
82
82
  >>> validator = ValidatorAgent(default_model="gpt-4o-mini")
83
83
  """
84
- config = AgentConfiguration(
84
+ configuration = AgentConfiguration(
85
85
  name="validator",
86
86
  instructions="Agent instructions",
87
87
  description="Validate user input and agent output against guardrails.",
@@ -89,7 +89,9 @@ class ValidatorAgent(AgentBase):
89
89
  )
90
90
  prompt_directory = prompt_dir or DEFAULT_PROMPT_DIR
91
91
  super().__init__(
92
- config=config, prompt_dir=prompt_directory, default_model=default_model
92
+ configuration=configuration,
93
+ prompt_dir=prompt_directory,
94
+ default_model=default_model,
93
95
  )
94
96
 
95
97
  async def run_agent(
openai_sdk_helpers/cli.py CHANGED
@@ -18,17 +18,8 @@ registry inspect
18
18
  from __future__ import annotations
19
19
 
20
20
  import argparse
21
- import json
22
21
  import sys
23
22
  from pathlib import Path
24
- from typing import Any
25
-
26
- try:
27
- import openai_sdk_helpers
28
-
29
- __version__ = getattr(openai_sdk_helpers, "__version__", "unknown")
30
- except ImportError:
31
- __version__ = "unknown"
32
23
 
33
24
 
34
25
  def cmd_agent_test(args: argparse.Namespace) -> int:
@@ -159,8 +150,8 @@ def cmd_registry_list(args: argparse.Namespace) -> int:
159
150
 
160
151
  print("Registered configurations:")
161
152
  for name in sorted(names):
162
- config = registry.get(name)
163
- tools_count = len(config.tools) if config.tools else 0
153
+ configuration = registry.get(name)
154
+ tools_count = len(configuration.tools) if configuration.tools else 0
164
155
  print(f" - {name} ({tools_count} tools)")
165
156
 
166
157
  return 0
@@ -199,7 +190,7 @@ def cmd_registry_inspect(args: argparse.Namespace) -> int:
199
190
  registry = get_default_registry()
200
191
 
201
192
  try:
202
- config = registry.get(args.config_name)
193
+ configuration = registry.get(args.config_name)
203
194
  except KeyError:
204
195
  print(f"Error: Configuration '{args.config_name}' not found", file=sys.stderr)
205
196
  print("\nAvailable configurations:")
@@ -207,17 +198,17 @@ def cmd_registry_inspect(args: argparse.Namespace) -> int:
207
198
  print(f" - {name}")
208
199
  return 1
209
200
 
210
- print(f"Configuration: {config.name}")
211
- instructions_str = str(config.instructions)
201
+ print(f"Configuration: {configuration.name}")
202
+ instructions_str = str(configuration.instructions)
212
203
  instructions_preview = (
213
204
  instructions_str[:100] if len(instructions_str) > 100 else instructions_str
214
205
  )
215
206
  print(f"Instructions: {instructions_preview}...")
216
- print(f"Tools: {len(config.tools) if config.tools else 0}")
207
+ print(f"Tools: {len(configuration.tools) if configuration.tools else 0}")
217
208
 
218
- if config.tools:
209
+ if configuration.tools:
219
210
  print("\nTool names:")
220
- for tool in config.tools:
211
+ for tool in configuration.tools:
221
212
  tool_name = tool.get("function", {}).get("name", "unknown")
222
213
  print(f" - {tool_name}")
223
214
 
@@ -245,11 +236,6 @@ def main(argv: list[str] | None = None) -> int:
245
236
  prog="openai-helpers",
246
237
  description="OpenAI SDK Helpers CLI",
247
238
  )
248
- parser.add_argument(
249
- "--version",
250
- action="version",
251
- version=f"openai-sdk-helpers {__version__}",
252
- )
253
239
 
254
240
  subparsers = parser.add_subparsers(dest="command", help="Commands")
255
241
 
@@ -18,8 +18,6 @@ get_data_path(name)
18
18
 
19
19
  from __future__ import annotations
20
20
 
21
- import os
22
- import os
23
21
  from pathlib import Path
24
22
  from dotenv import load_dotenv
25
23
 
@@ -61,18 +59,15 @@ def get_data_path(name: str) -> Path:
61
59
  return ensure_directory(path)
62
60
 
63
61
 
64
- def get_model() -> str:
65
- """Return the default model identifier.
62
+ def get_package_path() -> Path:
63
+ """Return the root path of the openai-sdk-helpers package.
66
64
 
67
65
  Returns
68
66
  -------
69
- str
70
- Default OpenAI model identifier.
71
-
72
- Examples
73
- --------
74
- >>> from openai_sdk_helpers.environment import _get_default_model
75
- >>> _get_default_model()
76
- 'gpt-4o-mini'
67
+ Path
68
+ Root directory path of the openai-sdk-helpers package.
77
69
  """
78
- return os.getenv("DEFAULT_MODEL", DEFAULT_MODEL)
70
+ return Path(__file__).parent
71
+
72
+
73
+ DEFAULT_PROMPT_DIR = get_package_path() / "prompt"
@@ -0,0 +1,7 @@
1
+ You are a vector search planner.
2
+
3
+ Instructions:
4
+ - Break the user query into 1-5 focused vector search queries.
5
+ - Prefer short, keyword-rich queries optimized for semantic retrieval.
6
+ - Avoid web-search phrasing like "site:" or "latest news."
7
+ - Provide a clear reason for each query.
@@ -0,0 +1,6 @@
1
+ You are a vector search executor.
2
+
3
+ Instructions:
4
+ - Use the provided search plan to retrieve relevant chunks from the vector store.
5
+ - Return only the retrieved text without adding external context.
6
+ - Do not perform web searches or fabricate sources.
@@ -0,0 +1,7 @@
1
+ You are a vector search report writer.
2
+
3
+ Instructions:
4
+ - Use only the provided vector search results.
5
+ - Summarize findings without referencing the public web.
6
+ - If sources are requested, describe them as vector store entries (e.g., file names or "vector store chunk").
7
+ - Keep the report focused on information grounded in the retrieved texts.
@@ -35,7 +35,7 @@ process_files
35
35
  from __future__ import annotations
36
36
 
37
37
  from .base import ResponseBase
38
- from .config import ResponseConfiguration, ResponseRegistry, get_default_registry
38
+ from .configuration import ResponseConfiguration, ResponseRegistry, get_default_registry
39
39
  from .files import process_files
40
40
  from .messages import ResponseMessage, ResponseMessages
41
41
  from .runner import run_async, run_streamed, run_sync
@@ -43,7 +43,7 @@ from openai.types.responses.response_input_text_param import ResponseInputTextPa
43
43
  from openai.types.responses.response_output_message import ResponseOutputMessage
44
44
 
45
45
  from .messages import ResponseMessage, ResponseMessages
46
- from ..config import OpenAISettings
46
+ from ..settings import OpenAISettings
47
47
  from ..structure import StructureBase
48
48
  from ..types import OpenAIClient
49
49
  from ..utils import (
@@ -55,7 +55,7 @@ from ..utils import (
55
55
  )
56
56
 
57
57
  if TYPE_CHECKING: # pragma: no cover - only for typing hints
58
- from openai_sdk_helpers.streamlit_app.config import StreamlitAppConfig
58
+ from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
59
59
 
60
60
  T = TypeVar("T", bound=StructureBase)
61
61
  ToolHandler = Callable[[ResponseFunctionToolCall], str | Any]
@@ -814,14 +814,14 @@ class ResponseBase(Generic[T]):
814
814
 
815
815
  Examples
816
816
  --------
817
- >>> config = MyResponse.build_streamlit_config(
817
+ >>> configuration = MyResponse.build_streamlit_config(
818
818
  ... display_title="My Assistant",
819
819
  ... description="A helpful AI assistant",
820
820
  ... system_vector_store=["docs", "kb"],
821
821
  ... model="gpt-4"
822
822
  ... )
823
823
  """
824
- from openai_sdk_helpers.streamlit_app.config import StreamlitAppConfig
824
+ from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
825
825
 
826
826
  normalized_stores = None
827
827
  if system_vector_store is not None:
@@ -6,9 +6,9 @@ from dataclasses import dataclass
6
6
  from pathlib import Path
7
7
  from typing import Generic, Optional, Sequence, Type, TypeVar
8
8
 
9
- from ..config import OpenAISettings
9
+ from ..settings import OpenAISettings
10
10
  from ..structure.base import StructureBase
11
- from ..response.base import ResponseBase, ToolHandler
11
+ from .base import ResponseBase, ToolHandler
12
12
  from ..utils.json.data_class import DataclassJSONSerializable
13
13
  from ..utils.registry import RegistryBase
14
14
  from ..utils.instructions import resolve_instructions_from_path
@@ -26,14 +26,14 @@ class ResponseRegistry(RegistryBase["ResponseConfiguration"]):
26
26
  Examples
27
27
  --------
28
28
  >>> registry = ResponseRegistry()
29
- >>> config = ResponseConfiguration(
29
+ >>> configuration = ResponseConfiguration(
30
30
  ... name="test",
31
31
  ... instructions="Test instructions",
32
32
  ... tools=None,
33
33
  ... input_structure=None,
34
34
  ... output_structure=None
35
35
  ... )
36
- >>> registry.register(config)
36
+ >>> registry.register(configuration)
37
37
  >>> retrieved = registry.get("test")
38
38
  >>> retrieved.name
39
39
  'test'
@@ -53,8 +53,8 @@ def get_default_registry() -> ResponseRegistry:
53
53
  Examples
54
54
  --------
55
55
  >>> registry = get_default_registry()
56
- >>> config = ResponseConfiguration(...)
57
- >>> registry.register(config)
56
+ >>> configuration = ResponseConfiguration(...)
57
+ >>> registry.register(configuration)
58
58
  """
59
59
  return _default_registry
60
60
 
@@ -119,13 +119,13 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
119
119
 
120
120
  Examples
121
121
  --------
122
- >>> config = Configuration(
122
+ >>> configuration = Configuration(
123
123
  ... name="targeting_to_plan",
124
124
  ... tools=None,
125
125
  ... input_structure=PromptStructure,
126
126
  ... output_structure=WebSearchStructure,
127
127
  ... )
128
- >>> config.name
128
+ >>> configuration.name
129
129
  'prompt_to_websearch'
130
130
  """
131
131
 
@@ -1,7 +1,7 @@
1
1
  """Planner response configuration."""
2
2
 
3
3
  from ..structure.plan.plan import PlanStructure
4
- from .config import ResponseConfiguration
4
+ from .configuration import ResponseConfiguration
5
5
 
6
6
  PLANNER = ResponseConfiguration(
7
7
  name="planner",
@@ -1,6 +1,6 @@
1
1
  """Prompter response configuration."""
2
2
 
3
- from .config import ResponseConfiguration
3
+ from .configuration import ResponseConfiguration
4
4
  from ..structure.prompt import PromptStructure
5
5
 
6
6
  PROMPTER = ResponseConfiguration(
@@ -17,7 +17,7 @@ _load_configuration
17
17
  Load configuration with user-friendly error handling for Streamlit UI.
18
18
  """
19
19
 
20
- from .config import (
20
+ from .configuration import (
21
21
  StreamlitAppConfig,
22
22
  StreamlitAppRegistry,
23
23
  _load_configuration,
@@ -28,7 +28,6 @@ from openai_sdk_helpers.utils import (
28
28
  coerce_jsonable,
29
29
  customJSONEncoder,
30
30
  ensure_list,
31
- log,
32
31
  )
33
32
 
34
33
  # Supported file extensions for OpenAI Assistants file search and vision
@@ -226,7 +225,7 @@ def _build_raw_output(result: Any, response: ResponseBase[Any]) -> dict[str, Any
226
225
  }
227
226
 
228
227
 
229
- def _get_response_instance(config: StreamlitAppConfig) -> ResponseBase[Any]:
228
+ def _get_response_instance(configuration: StreamlitAppConfig) -> ResponseBase[Any]:
230
229
  """Instantiate and cache the configured ResponseBase.
231
230
 
232
231
  Creates a new response instance from the configuration if not already
@@ -235,7 +234,7 @@ def _get_response_instance(config: StreamlitAppConfig) -> ResponseBase[Any]:
235
234
 
236
235
  Parameters
237
236
  ----------
238
- config : StreamlitAppConfig
237
+ configuration : StreamlitAppConfig
239
238
  Loaded configuration with response handler definition.
240
239
 
241
240
  Returns
@@ -258,13 +257,13 @@ def _get_response_instance(config: StreamlitAppConfig) -> ResponseBase[Any]:
258
257
  if isinstance(cached, ResponseBase):
259
258
  return cached
260
259
 
261
- response = config.create_response()
260
+ response = configuration.create_response()
262
261
 
263
- if config.preserve_vector_stores:
262
+ if configuration.preserve_vector_stores:
264
263
  setattr(response, "_cleanup_system_vector_storage", False)
265
264
  setattr(response, "_cleanup_user_vector_storage", False)
266
265
 
267
- vector_stores = config.normalized_vector_stores()
266
+ vector_stores = configuration.normalized_vector_stores()
268
267
  if vector_stores:
269
268
  attach_vector_store(response=response, vector_stores=vector_stores)
270
269
 
@@ -357,7 +356,7 @@ def _render_chat_history() -> None:
357
356
 
358
357
  def _handle_user_message(
359
358
  prompt: str,
360
- config: StreamlitAppConfig,
359
+ configuration: StreamlitAppConfig,
361
360
  attachment_paths: list[str] | None = None,
362
361
  attachment_names: list[str] | None = None,
363
362
  ) -> None:
@@ -371,7 +370,7 @@ def _handle_user_message(
371
370
  ----------
372
371
  prompt : str
373
372
  User-entered text to send to the assistant.
374
- config : StreamlitAppConfig
373
+ configuration : StreamlitAppConfig
375
374
  Loaded configuration with response handler definition.
376
375
  attachment_paths : list[str] or None, default None
377
376
  Optional list of file paths to attach to the message.
@@ -395,7 +394,7 @@ def _handle_user_message(
395
394
  {"role": "user", "content": prompt, "attachments": display_names}
396
395
  )
397
396
  try:
398
- response = _get_response_instance(config)
397
+ response = _get_response_instance(configuration)
399
398
  except Exception as exc: # pragma: no cover - surfaced in UI
400
399
  st.error(f"Failed to start response session: {exc}")
401
400
  return
@@ -442,15 +441,15 @@ def main(config_path: Path) -> None:
442
441
  >>> from pathlib import Path
443
442
  >>> main(Path("./my_config.py"))
444
443
  """
445
- config = _load_configuration(config_path)
446
- st.set_page_config(page_title=config.display_title, layout="wide")
444
+ configuration = _load_configuration(config_path)
445
+ st.set_page_config(page_title=configuration.display_title, layout="wide")
447
446
  _init_session_state()
448
447
 
449
- st.title(config.display_title)
450
- if config.description:
451
- st.caption(config.description)
452
- if config.model:
453
- st.caption(f"Model: {config.model}")
448
+ st.title(configuration.display_title)
449
+ if configuration.description:
450
+ st.caption(configuration.description)
451
+ if configuration.model:
452
+ st.caption(f"Model: {configuration.model}")
454
453
 
455
454
  close_col, _ = st.columns([1, 5])
456
455
  with close_col:
@@ -514,7 +513,7 @@ def main(config_path: Path) -> None:
514
513
  st.session_state["attachment_names"] = []
515
514
  _handle_user_message(
516
515
  prompt,
517
- config,
516
+ configuration,
518
517
  attachment_paths or None,
519
518
  attachment_display_names or None,
520
519
  )