openai-sdk-helpers 0.4.1__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ from .environment import get_data_path, get_model
5
6
  from .utils.async_utils import run_coroutine_thread_safe, run_coroutine_with_fallback
6
7
  from .context_manager import (
7
8
  AsyncManagedResource,
@@ -61,7 +62,7 @@ from .agent import (
61
62
  SummarizerAgent,
62
63
  TranslatorAgent,
63
64
  ValidatorAgent,
64
- VectorSearch,
65
+ VectorAgentSearch,
65
66
  WebAgentSearch,
66
67
  )
67
68
  from .response import (
@@ -103,6 +104,9 @@ from .types import (
103
104
  )
104
105
 
105
106
  __all__ = [
107
+ # Environment utilities
108
+ "get_data_path",
109
+ "get_model",
106
110
  # Async utilities
107
111
  "run_coroutine_thread_safe",
108
112
  "run_coroutine_with_fallback",
@@ -156,7 +160,7 @@ __all__ = [
156
160
  "SummarizerAgent",
157
161
  "TranslatorAgent",
158
162
  "ValidatorAgent",
159
- "VectorSearch",
163
+ "VectorAgentSearch",
160
164
  "WebAgentSearch",
161
165
  "ExtendedSummaryStructure",
162
166
  "WebSearchStructure",
@@ -12,7 +12,7 @@ from .summarizer import SummarizerAgent
12
12
  from .translator import TranslatorAgent
13
13
  from .validation import ValidatorAgent
14
14
  from .utils import run_coroutine_agent_sync
15
- from .search.vector import VectorSearch
15
+ from .search.vector import VectorAgentSearch
16
16
  from .search.web import WebAgentSearch
17
17
 
18
18
  __all__ = [
@@ -32,6 +32,6 @@ __all__ = [
32
32
  "SummarizerAgent",
33
33
  "TranslatorAgent",
34
34
  "ValidatorAgent",
35
- "VectorSearch",
35
+ "VectorAgentSearch",
36
36
  "WebAgentSearch",
37
37
  ]
@@ -2,8 +2,9 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import json
5
6
  from pathlib import Path
6
- from typing import Any, Callable, Dict, Optional, Protocol, cast
7
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Protocol, cast
7
8
  import uuid
8
9
 
9
10
  from agents import (
@@ -21,14 +22,21 @@ from jinja2 import Template
21
22
 
22
23
  from ..utils.json.data_class import DataclassJSONSerializable
23
24
  from ..structure.base import StructureBase
25
+ from ..structure.prompt import PromptStructure
24
26
 
25
27
  from ..utils import (
26
28
  check_filepath,
27
29
  log,
28
30
  )
29
31
 
32
+ from ..tools import tool_handler_factory
33
+
30
34
  from .runner import run_async, run_streamed, run_sync
31
35
 
36
+ if TYPE_CHECKING:
37
+ from ..config import OpenAISettings
38
+ from ..response.base import ResponseBase, ToolHandler
39
+
32
40
 
33
41
  class AgentConfigurationProtocol(Protocol):
34
42
  """Protocol describing the configuration attributes for AgentBase."""
@@ -181,6 +189,10 @@ class AgentBase(DataclassJSONSerializable):
181
189
  Return a streaming result for the agent execution.
182
190
  as_tool()
183
191
  Return the agent as a callable tool.
192
+ as_response_tool()
193
+ Return response tool handler and definition for Responses API use.
194
+ build_response(openai_settings, data_path=None, tool_handlers=None, system_vector_store=None)
195
+ Build a ResponseBase instance based on this agent.
184
196
  close()
185
197
  Clean up agent resources (can be overridden by subclasses).
186
198
  """
@@ -575,26 +587,156 @@ class AgentBase(DataclassJSONSerializable):
575
587
  )
576
588
  return tool_obj
577
589
 
578
- def as_response_tool(self) -> tuple[dict[str, Callable[..., Any]], dict[str, Any]]:
579
- """Return the agent as a callable response tool.
590
+ def as_response_tool(
591
+ self,
592
+ *,
593
+ tool_name: str | None = None,
594
+ tool_description: str | None = None,
595
+ ) -> tuple[dict[str, Callable[..., Any]], dict[str, Any]]:
596
+ """Return response tool handler and definition for Responses API use.
597
+
598
+ The returned handler serializes tool output as JSON using
599
+ ``tool_handler_factory`` so downstream response flows can rely on a
600
+ consistent payload format.
601
+
602
+ Parameters
603
+ ----------
604
+ tool_name : str or None, default=None
605
+ Optional override for the tool name. When None, uses the agent name.
606
+ tool_description : str or None, default=None
607
+ Optional override for the tool description. When None, uses the
608
+ agent description.
580
609
 
581
610
  Returns
582
611
  -------
583
- Tool
584
- Tool instance wrapping this agent for response generation.
612
+ tuple[dict[str, Callable[..., Any]], dict[str, Any]]
613
+ Tool handler mapping and tool definition for Responses API usage.
614
+
615
+ Examples
616
+ --------
617
+ >>> tool_handler, tool_definition = agent.as_response_tool()
618
+ >>> response = ResponseBase(
619
+ ... name="agent_tool",
620
+ ... instructions="Use the agent tool when needed.",
621
+ ... tools=[tool_definition],
622
+ ... output_structure=None,
623
+ ... tool_handlers=tool_handler,
624
+ ... openai_settings=settings,
625
+ ... )
626
+ >>> response.run_sync("Invoke the agent tool") # doctest: +SKIP
585
627
  """
586
- tool_handler = {self.name: self.run_sync}
628
+
629
+ def _run_agent(**kwargs: Any) -> Any:
630
+ prompt = kwargs.get("prompt")
631
+ if prompt is None:
632
+ if len(kwargs) == 1:
633
+ prompt = next(iter(kwargs.values()))
634
+ else:
635
+ prompt = json.dumps(kwargs)
636
+ return self.run_sync(str(prompt))
637
+
638
+ name = tool_name or self.name
639
+ description = tool_description or self.description
640
+ input_model = self._input_structure or PromptStructure
641
+ tool_handler = {name: tool_handler_factory(_run_agent, input_model=input_model)}
587
642
  tool_definition = {
588
643
  "type": "function",
589
- "name": self.name,
590
- "description": self.description,
644
+ "name": name,
645
+ "description": description,
591
646
  "strict": True,
592
647
  "additionalProperties": False,
648
+ "parameters": self._build_response_parameters(),
593
649
  }
594
- if self.output_structure:
595
- tool_definition["parameters"] = self.output_structure.get_schema()
596
650
  return tool_handler, tool_definition
597
651
 
652
+ def build_response(
653
+ self,
654
+ *,
655
+ openai_settings: OpenAISettings,
656
+ data_path: Path | str | None = None,
657
+ tool_handlers: dict[str, ToolHandler] | None = None,
658
+ system_vector_store: list[str] | None = None,
659
+ ) -> ResponseBase[StructureBase]:
660
+ """Build a ResponseBase instance from this agent configuration.
661
+
662
+ Parameters
663
+ ----------
664
+ openai_settings : OpenAISettings
665
+ Authentication and model settings applied to the generated response.
666
+ data_path : Path, str, or None, default None
667
+ Optional path for storing response artifacts. When None, the
668
+ response uses the default data directory.
669
+ tool_handlers : dict[str, ToolHandler] or None, default None
670
+ Optional mapping of tool names to handler callables.
671
+ system_vector_store : list[str] or None, default None
672
+ Optional list of vector store names to attach as system context.
673
+
674
+ Returns
675
+ -------
676
+ ResponseBase[StructureBase]
677
+ ResponseBase instance configured with this agent's settings.
678
+
679
+ Examples
680
+ --------
681
+ >>> from openai_sdk_helpers import OpenAISettings
682
+ >>> response = agent.build_response(openai_settings=OpenAISettings.from_env())
683
+ """
684
+ from ..response.base import ResponseBase, ToolHandler
685
+ from ..config import OpenAISettings
686
+
687
+ if not isinstance(openai_settings, OpenAISettings):
688
+ raise TypeError("openai_settings must be an OpenAISettings instance")
689
+
690
+ tools = self._normalize_response_tools(self.tools)
691
+
692
+ return ResponseBase(
693
+ name=self.name,
694
+ instructions=self.instructions_text,
695
+ tools=tools,
696
+ output_structure=self.output_structure,
697
+ system_vector_store=system_vector_store,
698
+ data_path=data_path,
699
+ tool_handlers=tool_handlers,
700
+ openai_settings=openai_settings,
701
+ )
702
+
703
+ def _build_response_parameters(self) -> dict[str, Any]:
704
+ """Build the Responses API parameter schema for this agent tool.
705
+
706
+ Returns
707
+ -------
708
+ dict[str, Any]
709
+ JSON schema describing tool input parameters.
710
+ """
711
+ if self._input_structure is not None:
712
+ return self._input_structure.get_schema()
713
+ return {
714
+ "type": "object",
715
+ "properties": {
716
+ "prompt": {"type": "string", "description": "Prompt text to run."}
717
+ },
718
+ "required": ["prompt"],
719
+ "additionalProperties": False,
720
+ }
721
+
722
+ @staticmethod
723
+ def _normalize_response_tools(tools: Optional[list]) -> Optional[list]:
724
+ """Normalize tool definitions for the Responses API."""
725
+ if not tools:
726
+ return tools
727
+
728
+ normalized: list[Any] = []
729
+ for tool in tools:
730
+ if hasattr(tool, "to_dict") and callable(tool.to_dict):
731
+ normalized.append(tool.to_dict())
732
+ elif hasattr(tool, "to_openai_tool") and callable(tool.to_openai_tool):
733
+ normalized.append(tool.to_openai_tool())
734
+ elif hasattr(tool, "schema"):
735
+ normalized.append(tool.schema)
736
+ else:
737
+ normalized.append(tool)
738
+ return normalized
739
+
598
740
  def __enter__(self) -> AgentBase:
599
741
  """Enter the context manager for resource management.
600
742
 
@@ -187,9 +187,8 @@ class AgentConfiguration(DataclassJSONSerializable):
187
187
  input_guardrails: Optional[list[InputGuardrail]] = None
188
188
  output_guardrails: Optional[list[OutputGuardrail]] = None
189
189
  session: Optional[Session] = None
190
- _instructions_cache: Optional[str] = field(
191
- default=None, init=False, repr=False, compare=False
192
- )
190
+ add_output_instructions: bool = False
191
+ add_web_search_tool: bool = False
193
192
 
194
193
  def __post_init__(self) -> None:
195
194
  """Validate configuration invariants after initialization.
@@ -258,11 +257,16 @@ class AgentConfiguration(DataclassJSONSerializable):
258
257
  str
259
258
  Plain-text instructions, loading template files when necessary.
260
259
  """
261
- cached = self._instructions_cache
262
- if cached is None:
263
- cached = self._resolve_instructions()
264
- object.__setattr__(self, "_instructions_cache", cached)
265
- return cached
260
+ resolved_instructions: str = resolve_instructions_from_path(self.instructions)
261
+ output_instructions = ""
262
+ if self.output_structure is not None and self.add_output_instructions:
263
+ output_instructions = self.output_structure.get_prompt(
264
+ add_enum_values=False
265
+ )
266
+ if output_instructions:
267
+ return f"{resolved_instructions}\n{output_instructions}"
268
+
269
+ return resolved_instructions
266
270
 
267
271
  def _resolve_instructions(self) -> str:
268
272
  """Resolve instructions from string or file path."""
@@ -361,66 +365,37 @@ class AgentConfiguration(DataclassJSONSerializable):
361
365
 
362
366
  return replace(self, **changes)
363
367
 
364
- def to_json(self) -> dict[str, Any]:
365
- """Return a JSON-compatible dict representation.
366
-
367
- Returns
368
- -------
369
- dict[str, Any]
370
- Serialized configuration data without cached fields.
371
- """
372
- data = DataclassJSONSerializable.to_json(self)
373
- data.pop("_instructions_cache", None)
374
- return data
375
-
376
- @classmethod
377
- def from_json(cls, data: dict[str, Any]) -> AgentConfiguration:
378
- """Create an AgentConfiguration from JSON data.
368
+ def to_response_config(self) -> Any:
369
+ """Convert this AgentConfiguration to a ResponseConfiguration.
379
370
 
380
- Overrides the default JSONSerializable.from_json to properly handle
381
- the instructions field, converting string paths that look like file
382
- paths back to Path objects for proper file loading.
383
-
384
- Parameters
385
- ----------
386
- data : dict[str, Any]
387
- Dictionary containing the configuration data.
371
+ This is a convenience method for creating a ResponseConfiguration
372
+ instance using the relevant fields from this agent configuration.
388
373
 
389
374
  Returns
390
375
  -------
391
- AgentConfiguration
392
- New configuration instance.
393
-
394
- Notes
395
- -----
396
- This method attempts to preserve the original type of the instructions
397
- field. If instructions is a string that represents an existing file path,
398
- it will be converted to a Path object to ensure proper file loading
399
- behavior is maintained across JSON round-trips.
376
+ ResponseConfiguration
377
+ New response configuration instance.
378
+
379
+ Examples
380
+ --------
381
+ >>> agent_config = AgentConfiguration(
382
+ ... name="responder",
383
+ ... model="gpt-4o-mini",
384
+ ... instructions="Respond to user queries"
385
+ ... )
386
+ >>> response_config = agent_config.to_response_config()
387
+ >>> response_config.name
388
+ 'responder'
400
389
  """
401
- # Make a copy to avoid modifying the input
402
- data = data.copy()
403
- data.pop("_instructions_cache", None)
404
-
405
- # Handle instructions field: if it's a string path to an existing file,
406
- # convert it back to Path for proper file loading
407
- if "instructions" in data and data["instructions"] is not None:
408
- instructions_value = data["instructions"]
409
- if isinstance(instructions_value, str):
410
- # Check if it looks like a file path and the file exists
411
- # This preserves the intended behavior for file-based instructions
412
- try:
413
- potential_path = Path(instructions_value)
414
- # Only convert to Path if it's an existing file
415
- # This way, plain text instructions stay as strings
416
- if potential_path.exists() and potential_path.is_file():
417
- data["instructions"] = potential_path
418
- except (OSError, ValueError):
419
- # If path parsing fails, keep it as a string (likely plain text)
420
- pass
421
-
422
- # Use the parent class method for the rest
423
- return super(AgentConfiguration, cls).from_json(data)
390
+ from ..response.config import ResponseConfiguration
391
+
392
+ return ResponseConfiguration(
393
+ name=self.name,
394
+ instructions=self.instructions,
395
+ input_structure=self.input_structure,
396
+ output_structure=self.output_structure,
397
+ tools=self.tools,
398
+ )
424
399
 
425
400
 
426
401
  # Global default registry instance
@@ -10,10 +10,10 @@ from .web import (
10
10
  )
11
11
  from .vector import (
12
12
  MAX_CONCURRENT_SEARCHES as VECTOR_MAX_CONCURRENT_SEARCHES,
13
- VectorSearchPlanner,
13
+ VectorAgentPlanner,
14
14
  VectorSearchTool,
15
15
  VectorSearchWriter,
16
- VectorSearch,
16
+ VectorAgentSearch,
17
17
  )
18
18
 
19
19
  __all__ = [
@@ -26,8 +26,8 @@ __all__ = [
26
26
  "WebAgentWriter",
27
27
  "WebAgentSearch",
28
28
  "VECTOR_MAX_CONCURRENT_SEARCHES",
29
- "VectorSearchPlanner",
29
+ "VectorAgentPlanner",
30
30
  "VectorSearchTool",
31
31
  "VectorSearchWriter",
32
- "VectorSearch",
32
+ "VectorAgentSearch",
33
33
  ]
@@ -7,6 +7,7 @@ from typing import Any, Callable, Dict, List, Optional
7
7
 
8
8
  from agents import custom_span, gen_trace_id, trace
9
9
 
10
+ from ...structure.prompt import PromptStructure
10
11
  from ...structure.vector_search import (
11
12
  VectorSearchItemStructure,
12
13
  VectorSearchItemResultStructure,
@@ -15,6 +16,7 @@ from ...structure.vector_search import (
15
16
  VectorSearchPlanStructure,
16
17
  VectorSearchReportStructure,
17
18
  )
19
+ from ...tools import tool_handler_factory
18
20
  from ...vector_storage import VectorStorage
19
21
  from ..config import AgentConfiguration
20
22
  from ..utils import run_coroutine_agent_sync
@@ -23,7 +25,7 @@ from .base import SearchPlanner, SearchToolAgent, SearchWriter
23
25
  MAX_CONCURRENT_SEARCHES = 10
24
26
 
25
27
 
26
- class VectorSearchPlanner(SearchPlanner[VectorSearchPlanStructure]):
28
+ class VectorAgentPlanner(SearchPlanner[VectorSearchPlanStructure]):
27
29
  """Plan vector searches to satisfy a user query.
28
30
 
29
31
  Parameters
@@ -241,7 +243,7 @@ class VectorSearchWriter(SearchWriter[VectorSearchReportStructure]):
241
243
  )
242
244
 
243
245
 
244
- class VectorSearch:
246
+ class VectorAgentSearch:
245
247
  """Manage the complete vector search workflow.
246
248
 
247
249
  This high-level agent orchestrates a multi-step research process that plans
@@ -292,6 +294,8 @@ class VectorSearch:
292
294
  Execute the research workflow asynchronously.
293
295
  run_agent_sync(search_query)
294
296
  Execute the research workflow synchronously.
297
+ as_response_tool(vector_store_name, tool_name, tool_description)
298
+ Build a Responses API tool definition and handler.
295
299
  run_vector_agent(search_query)
296
300
  Convenience asynchronous entry point for the workflow.
297
301
  run_vector_agent_sync(search_query)
@@ -306,9 +310,9 @@ class VectorSearch:
306
310
  def __init__(
307
311
  self,
308
312
  *,
313
+ vector_store_name: str,
309
314
  prompt_dir: Optional[Path] = None,
310
315
  default_model: Optional[str] = None,
311
- vector_store_name: Optional[str] = None,
312
316
  max_concurrent_searches: int = MAX_CONCURRENT_SEARCHES,
313
317
  vector_storage: Optional[VectorStorage] = None,
314
318
  vector_storage_factory: Optional[Callable[[str], VectorStorage]] = None,
@@ -336,7 +340,7 @@ class VectorSearch:
336
340
  """
337
341
  trace_id = gen_trace_id()
338
342
  with trace("VectorSearch trace", trace_id=trace_id):
339
- planner = VectorSearchPlanner(
343
+ planner = VectorAgentPlanner(
340
344
  prompt_dir=self._prompt_dir, default_model=self._default_model
341
345
  )
342
346
  tool = VectorSearchTool(
@@ -383,45 +387,53 @@ class VectorSearch:
383
387
  """
384
388
  return run_coroutine_agent_sync(self.run_agent(search_query))
385
389
 
386
- @staticmethod
387
- async def run_vector_agent(search_query: str) -> VectorSearchStructure:
388
- """Return a research report for the given query using ``VectorSearch``.
390
+ def as_response_tool(
391
+ self,
392
+ *,
393
+ tool_name: str = "vector_search",
394
+ tool_description: str = "Run the vector search workflow.",
395
+ ) -> tuple[dict[str, Callable[..., Any]], dict[str, Any]]:
396
+ """Return a Responses API tool handler and definition.
389
397
 
390
398
  Parameters
391
399
  ----------
392
- search_query : str
393
- User's research query.
400
+ vector_store_name : str
401
+ Name of the vector store to use for the response tool.
402
+ tool_name : str, default="vector_search"
403
+ Name to use for the response tool.
404
+ tool_description : str, default="Run the vector search workflow."
405
+ Description for the response tool.
394
406
 
395
407
  Returns
396
408
  -------
397
- VectorSearchStructure
398
- Completed research output.
409
+ tuple[dict[str, Callable[..., Any]], dict[str, Any]]
410
+ Tool handler mapping and tool definition for Responses API usage.
399
411
  """
400
- return await VectorSearch().run_agent(search_query=search_query)
412
+ search = VectorAgentSearch(
413
+ prompt_dir=self._prompt_dir,
414
+ default_model=self._default_model,
415
+ vector_store_name=self._vector_store_name,
416
+ max_concurrent_searches=self._max_concurrent_searches,
417
+ vector_storage=self._vector_storage,
418
+ vector_storage_factory=self._vector_storage_factory,
419
+ )
401
420
 
402
- @staticmethod
403
- def run_vector_agent_sync(search_query: str) -> VectorSearchStructure:
404
- """Run :meth:`run_vector_agent` synchronously for ``search_query``.
421
+ def _run_search(prompt: str) -> VectorSearchStructure:
422
+ return run_coroutine_agent_sync(search.run_agent(search_query=prompt))
405
423
 
406
- Parameters
407
- ----------
408
- search_query : str
409
- User's research query.
410
-
411
- Returns
412
- -------
413
- VectorSearchStructure
414
- Completed research output.
415
- """
416
- return run_coroutine_agent_sync(
417
- VectorSearch.run_vector_agent(search_query=search_query)
424
+ tool_handler = {
425
+ tool_name: tool_handler_factory(_run_search, input_model=PromptStructure)
426
+ }
427
+ tool_definition = PromptStructure.response_tool_definition(
428
+ tool_name, tool_description=tool_description
418
429
  )
430
+ return tool_handler, tool_definition
419
431
 
420
432
 
421
433
  __all__ = [
422
434
  "MAX_CONCURRENT_SEARCHES",
423
- "VectorSearchPlanner",
435
+ "VectorAgentPlanner",
424
436
  "VectorSearchTool",
425
437
  "VectorSearchWriter",
426
- "VectorSearch",
438
+ "VectorAgentSearch",
427
439
  ]
@@ -3,12 +3,13 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from pathlib import Path
6
- from typing import Any, Dict, List, Optional, Union
6
+ from typing import Any, Callable, Dict, List, Optional, Union
7
7
 
8
8
  from agents import custom_span, gen_trace_id, trace
9
9
  from agents.model_settings import ModelSettings
10
10
  from agents.tool import WebSearchTool
11
11
 
12
+ from ...structure.prompt import PromptStructure
12
13
  from ...structure.web_search import (
13
14
  WebSearchItemStructure,
14
15
  WebSearchItemResultStructure,
@@ -16,6 +17,7 @@ from ...structure.web_search import (
16
17
  WebSearchPlanStructure,
17
18
  WebSearchReportStructure,
18
19
  )
20
+ from ...tools import tool_handler_factory
19
21
  from ..config import AgentConfiguration
20
22
  from ..utils import run_coroutine_agent_sync
21
23
  from .base import SearchPlanner, SearchToolAgent, SearchWriter
@@ -242,6 +244,8 @@ class WebAgentSearch:
242
244
  Execute the research workflow asynchronously.
243
245
  run_agent_sync(search_query)
244
246
  Execute the research workflow synchronously.
247
+ as_response_tool(tool_name, tool_description)
248
+ Build a Responses API tool definition and handler.
245
249
  run_web_agent_async(search_query)
246
250
  Convenience asynchronous entry point for the workflow.
247
251
  run_web_agent_sync(search_query)
@@ -301,7 +305,7 @@ class WebAgentSearch:
301
305
  )
302
306
 
303
307
  def run_agent_sync(self, search_query: str) -> WebSearchStructure:
304
- """Run :meth:`run_agent_async` synchronously for ``search_query``.
308
+ """Execute the entire research workflow for ``search_query`` synchronously.
305
309
 
306
310
  Parameters
307
311
  ----------
@@ -312,41 +316,41 @@ class WebAgentSearch:
312
316
  -------
313
317
  WebSearchStructure
314
318
  Completed research output.
319
+
315
320
  """
316
321
  return run_coroutine_agent_sync(self.run_agent_async(search_query))
317
322
 
318
- async def run_web_agent_async(self, search_query: str) -> WebSearchStructure:
319
- """Return a research report for the given query using ``WebAgentSearch``.
323
+ def as_response_tool(
324
+ self,
325
+ *,
326
+ tool_name: str = "web_search",
327
+ tool_description: str = "Run the web search workflow.",
328
+ ) -> tuple[dict[str, Callable[..., Any]], dict[str, Any]]:
329
+ """Return a Responses API tool handler and definition.
320
330
 
321
331
  Parameters
322
332
  ----------
323
- search_query : str
324
- User's research query.
333
+ tool_name : str, default="web_search"
334
+ Name to use for the response tool.
335
+ tool_description : str, default="Run the web search workflow."
336
+ Description for the response tool.
325
337
 
326
338
  Returns
327
339
  -------
328
- WebSearchStructure
329
- Completed research output.
340
+ tuple[dict[str, Callable[..., Any]], dict[str, Any]]
341
+ Tool handler mapping and tool definition for Responses API usage.
330
342
  """
331
- return await self.run_agent_async(search_query=search_query)
332
343
 
333
- @staticmethod
334
- def run_web_agent_sync(search_query: str) -> WebSearchStructure:
335
- """Run :meth:`run_web_agent_async` synchronously for ``search_query``.
344
+ def _run_search(prompt: str) -> WebSearchStructure:
345
+ return run_coroutine_agent_sync(self.run_agent_async(search_query=prompt))
336
346
 
337
- Parameters
338
- ----------
339
- search_query : str
340
- User's research query.
341
-
342
- Returns
343
- -------
344
- WebSearchStructure
345
- Completed research output.
346
- """
347
- return run_coroutine_agent_sync(
348
- WebAgentSearch().run_web_agent_async(search_query=search_query)
347
+ tool_handler = {
348
+ tool_name: tool_handler_factory(_run_search, input_model=PromptStructure)
349
+ }
350
+ tool_definition = PromptStructure.response_tool_definition(
351
+ tool_name, tool_description=tool_description
349
352
  )
353
+ return tool_handler, tool_definition
350
354
 
351
355
 
352
356
  __all__ = [
@@ -18,10 +18,15 @@ get_data_path(name)
18
18
 
19
19
  from __future__ import annotations
20
20
 
21
+ import os
22
+ import os
21
23
  from pathlib import Path
24
+ from dotenv import load_dotenv
25
+
22
26
 
23
27
  from openai_sdk_helpers.utils import ensure_directory
24
28
 
29
+ load_dotenv()
25
30
  DATETIME_FMT = "%Y%m%d_%H%M%S"
26
31
  DEFAULT_MODEL = "gpt-4o-mini"
27
32
 
@@ -54,3 +59,20 @@ def get_data_path(name: str) -> Path:
54
59
  base = Path(__file__).parent.parent.parent / "data"
55
60
  path = base / name
56
61
  return ensure_directory(path)
62
+
63
+
64
+ def get_model() -> str:
65
+ """Return the default model identifier.
66
+
67
+ Returns
68
+ -------
69
+ str
70
+ Default OpenAI model identifier.
71
+
72
+ Examples
73
+ --------
74
+ >>> from openai_sdk_helpers.environment import _get_default_model
75
+ >>> _get_default_model()
76
+ 'gpt-4o-mini'
77
+ """
78
+ return os.getenv("DEFAULT_MODEL", DEFAULT_MODEL)
@@ -135,7 +135,7 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
135
135
  input_structure: Optional[Type[TIn]]
136
136
  output_structure: Optional[Type[TOut]]
137
137
  system_vector_store: Optional[list[str]] = None
138
- add_output_instructions: bool = True
138
+ add_output_instructions: bool = False
139
139
  add_web_search_tool: bool = False
140
140
 
141
141
  def __post_init__(self) -> None:
@@ -0,0 +1,12 @@
1
+ """Planner response configuration."""
2
+
3
+ from ..structure.plan.plan import PlanStructure
4
+ from .config import ResponseConfiguration
5
+
6
+ PLANNER = ResponseConfiguration(
7
+ name="planner",
8
+ instructions="Generates structured prompts based on user input.",
9
+ tools=None,
10
+ input_structure=None,
11
+ output_structure=PlanStructure,
12
+ )
@@ -0,0 +1,12 @@
1
+ """Prompter response configuration."""
2
+
3
+ from .config import ResponseConfiguration
4
+ from ..structure.prompt import PromptStructure
5
+
6
+ PROMPTER = ResponseConfiguration(
7
+ name="prompter",
8
+ instructions="Generates structured prompts based on user input.",
9
+ tools=None,
10
+ input_structure=None,
11
+ output_structure=PromptStructure,
12
+ )
@@ -46,7 +46,7 @@ class StreamlitWebSearch(ResponseBase[WebSearchStructure]):
46
46
  async def perform_search(tool) -> str:
47
47
  """Perform a web search and return structured results."""
48
48
  structured_data = PromptStructure.from_tool_arguments(tool.arguments)
49
- web_result = await WebAgentSearch(default_model=DEFAULT_MODEL).run_web_agent_async(
49
+ web_result = await WebAgentSearch(default_model=DEFAULT_MODEL).run_agent_async(
50
50
  structured_data.prompt
51
51
  )
52
52
  payload = coerce_jsonable(web_result)
@@ -32,6 +32,7 @@ def get_module_qualname(obj: Any) -> tuple[str, str] | None:
32
32
  ... pass
33
33
  >>> get_module_qualname(MyClass)
34
34
  ('__main__', 'MyClass')
35
+
35
36
  """
36
37
  module = getattr(obj, "__module__", None)
37
38
  qualname = getattr(obj, "__qualname__", None)
@@ -59,6 +60,7 @@ def encode_module_qualname(obj: Any) -> dict[str, Any] | None:
59
60
  ... pass
60
61
  >>> encode_module_qualname(MyClass)
61
62
  {'module': '__main__', 'qualname': 'MyClass'}
63
+
62
64
  """
63
65
  result = get_module_qualname(obj)
64
66
  if result is None:
@@ -85,6 +87,7 @@ def decode_module_qualname(ref: dict[str, Any]) -> Any | None:
85
87
  >>> ref = {'module': 'pathlib', 'qualname': 'Path'}
86
88
  >>> decode_module_qualname(ref)
87
89
  <class 'pathlib.Path'>
90
+
88
91
  """
89
92
  if not isinstance(ref, dict):
90
93
  return None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-sdk-helpers
3
- Version: 0.4.1
3
+ Version: 0.4.2
4
4
  Summary: Composable helpers for OpenAI SDK agents, prompts, and storage
5
5
  Author: openai-sdk-helpers maintainers
6
6
  License: MIT
@@ -1,9 +1,9 @@
1
- openai_sdk_helpers/__init__.py,sha256=ZvcdJWHP7oMRxSGemxgO7CIpF0dc6oBTh4oftZAYv3c,4640
1
+ openai_sdk_helpers/__init__.py,sha256=19MEuQFgMqr6YqEc0QdmNe5YTg5JJVVCIisrYLFbqGk,4766
2
2
  openai_sdk_helpers/cli.py,sha256=YnQz-IcAqcBdh8eCCxVYa7NHDuHgHaU-PJ4FWPvkz58,8278
3
3
  openai_sdk_helpers/config.py,sha256=xK_u0YNKgtPrLrZrVr4F4k0CvSuYbsmkqqw9mCMdyF8,10932
4
4
  openai_sdk_helpers/context_manager.py,sha256=QqlrtenwKoz2krY0IzuToKdTX1HptUYtIEylxieybgY,6633
5
5
  openai_sdk_helpers/deprecation.py,sha256=VF0VDDegawYhsu5f-vE6dop9ob-jv8egxsm0KsPvP9E,4753
6
- openai_sdk_helpers/environment.py,sha256=PqH9viiGIgRAiwU8xPPWlE25KbgSBLmE6nj7S8izCV4,1491
6
+ openai_sdk_helpers/environment.py,sha256=9SYGAgf6dp0aknDdvcnSD40vJWONZsVhO-i8Ayo3jpg,1906
7
7
  openai_sdk_helpers/errors.py,sha256=0TLrcpRXPBvk2KlrU5I1VAQl-sYy-d15h_SMDkEawvI,2757
8
8
  openai_sdk_helpers/files_api.py,sha256=uMKHvGg1Od0J95Izl3AG9ofQYq8EDJXEty7zP0oKjJM,12569
9
9
  openai_sdk_helpers/logging_config.py,sha256=JcR0FTWht1tYdwD-bXH835pr0JV0RwHfY3poruiZGHM,795
@@ -11,9 +11,9 @@ openai_sdk_helpers/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  openai_sdk_helpers/retry.py,sha256=J10oQYphfzDXm3BnLoXwxk7PAhN93TC2LQOv0VDGOwI,6533
12
12
  openai_sdk_helpers/tools.py,sha256=Awj5htt1ImBbNToM1u6qdrIZ-7MiPZAXZ_oKKiWivy8,10547
13
13
  openai_sdk_helpers/types.py,sha256=xzldCRfwCZ3rZl18IBmfgA-PVdoZKSWNrlSIhirumSo,1451
14
- openai_sdk_helpers/agent/__init__.py,sha256=vEI4LG7WJaP6WwCcO_5lE7CEOHOl2vUhY5Ai4El2Syk,1060
15
- openai_sdk_helpers/agent/base.py,sha256=BZ9EImXFeM9x_79T9tla8cD7CzYDstZBkv4mQkZ7XYk,21635
16
- openai_sdk_helpers/agent/config.py,sha256=tTw2f0VZmIl8MQrJzdsYFZ-daSrm9mpTdZ1RxFON9Yc,15848
14
+ openai_sdk_helpers/agent/__init__.py,sha256=39nYVK8okZv_srC86HStwtKirkH1_FXkacoqfV73naA,1070
15
+ openai_sdk_helpers/agent/base.py,sha256=iv14LURB5PFcbRHuP1lWyj8JbKvDqM1m1Tf1mOi6CA8,27080
16
+ openai_sdk_helpers/agent/config.py,sha256=NlSE1T1T3fUOu4GJzwjmrVGxB1aI969yYg6sFXgwHCI,14611
17
17
  openai_sdk_helpers/agent/coordination.py,sha256=VTzyl4RV1q4ugiyFW4Fj7pOAVVO0bMRD63PfQRDwfoQ,18030
18
18
  openai_sdk_helpers/agent/prompt_utils.py,sha256=-1M66tqQxh9wWCFg6X-K7cCcqauca3yA04ZjvOpN3bA,337
19
19
  openai_sdk_helpers/agent/runner.py,sha256=aOVN1OYKK5_u7oFBqRCOOeTgcb-lLl4kZGxuPLmJrMw,4884
@@ -21,10 +21,10 @@ openai_sdk_helpers/agent/summarizer.py,sha256=lg_PLB1DSHox3PNDgiCzvCPM5VoCUbKEMG
21
21
  openai_sdk_helpers/agent/translator.py,sha256=3u7er1GhUGdy7OMa3A_vyqFFZfev3XBCZW_6w5OwYVc,6286
22
22
  openai_sdk_helpers/agent/utils.py,sha256=DTD5foCqGYfXf13F2bZMYIQROl7SbDSy5GDPGi0Zl-0,1089
23
23
  openai_sdk_helpers/agent/validation.py,sha256=6NHZIFaUOqRZeYqvRBnDc_uApAV3YHJnOhLHKbVUsi0,5094
24
- openai_sdk_helpers/agent/search/__init__.py,sha256=xqosfzH4HcBs9IFZks9msG_694rS5q6Ea4_qNeRQRmU,798
24
+ openai_sdk_helpers/agent/search/__init__.py,sha256=LXXzEcX2MU7_htHRdRCGPw0hsr9CrZn0ESii7GZJMBw,806
25
25
  openai_sdk_helpers/agent/search/base.py,sha256=VokTw3-V2yxGzm2WzlcvU100h3UaeyGslCFwIgMvJwI,10146
26
- openai_sdk_helpers/agent/search/vector.py,sha256=i5nWUvEE9eAv41FwbMT52uORZ7OHCEc8d4F6qH-klCc,14282
27
- openai_sdk_helpers/agent/search/web.py,sha256=7rYvFAZ1S00IaFPcneEOP2yY2vKIvYdnJvAbVTRbESc,10767
26
+ openai_sdk_helpers/agent/search/vector.py,sha256=A1HskDI6YVd3D9IQncowgiWUy9ptlMlSJhrBRDyqroM,15167
27
+ openai_sdk_helpers/agent/search/web.py,sha256=EQ0Rgcz21Rm9bDGPr8XPlDj34_nH2wnB7ER9rBy48Ak,11199
28
28
  openai_sdk_helpers/enums/__init__.py,sha256=aFf79C4JBeLC3kMlJfSpehyjx5uNCtW6eK5rD6ZFfhM,322
29
29
  openai_sdk_helpers/enums/base.py,sha256=cNllDtzcgI0_eZYXxFko14yhxwicX6xbeDfz9gFE3qo,2753
30
30
  openai_sdk_helpers/prompt/__init__.py,sha256=MOqgKwG9KLqKudoKRlUfLxiSmdOi2aD6hNrWDFqLHkk,418
@@ -34,16 +34,18 @@ openai_sdk_helpers/prompt/translator.jinja,sha256=SZhW8ipEzM-9IA4wyS_r2wIMTAclWr
34
34
  openai_sdk_helpers/prompt/validator.jinja,sha256=6t8q_IdxFd3mVBGX6SFKNOert1Wo3YpTOji2SNEbbtE,547
35
35
  openai_sdk_helpers/response/__init__.py,sha256=Rh3tBygneOhS-Er_4dtX4Xa69ukvxYv01brq26VpgwQ,1886
36
36
  openai_sdk_helpers/response/base.py,sha256=OA1p9h6EIzwt8VCWFXEalaQHOe0_eZDefqs5jQKu-vU,34844
37
- openai_sdk_helpers/response/config.py,sha256=nVEf4KSlBmKHa9Yl78nNi-t60BIwkGqBC2bCRu_q_xQ,9304
37
+ openai_sdk_helpers/response/config.py,sha256=ugZIP29krecf6JXiwkrc1nBDCdT_C9DSOCdPkLRN4wY,9305
38
38
  openai_sdk_helpers/response/files.py,sha256=6iHXeNZg4R08ilQ7D53qIJDQGYPpTLcByAhNJlEwbZ4,13226
39
39
  openai_sdk_helpers/response/messages.py,sha256=qX3sW79rLuJEys28zyv5MovZikwGOaLevzdVN0VYMRE,10104
40
+ openai_sdk_helpers/response/planner.py,sha256=OfqrANheofY2155kVVfAWPPAHlnSnhaF0MLUHwNgPBU,333
41
+ openai_sdk_helpers/response/prompter.py,sha256=vaHrNAAB9Z5WYwQeTKfOkAoH6DaFx1aRcywngqr47Pc,337
40
42
  openai_sdk_helpers/response/runner.py,sha256=Rg8XmxU5UwxJc3MjPlYlXWDimxy_cjxzefGiruNZK6s,4269
41
43
  openai_sdk_helpers/response/tool_call.py,sha256=c9Filh4IG5H_RWuJlYl6KUZDaF7mCjkabFRQMNiz7zM,7422
42
44
  openai_sdk_helpers/response/vector_store.py,sha256=cG5Mzdhjw5FsX1phgclIGz2MQ8f8uMKBaage1O2EZQU,3074
43
45
  openai_sdk_helpers/streamlit_app/__init__.py,sha256=DIXClgbzncsex2vnXUGjBwvykazx4-Bz089beZiq8vc,805
44
46
  openai_sdk_helpers/streamlit_app/app.py,sha256=jNkMQ4zkfojP501qk_vncyLN4TymiDXxA3cXkUvBfsw,17402
45
47
  openai_sdk_helpers/streamlit_app/config.py,sha256=t1fylt53eVmnNOfBXwpfDyG-Jji9JBUb0ZyrtUWBZ1s,16594
46
- openai_sdk_helpers/streamlit_app/streamlit_web_search.py,sha256=21xhBhdTsqK6ybPcLzSKSLOVeK8a3x9y_rRNvNBOAGM,2812
48
+ openai_sdk_helpers/streamlit_app/streamlit_web_search.py,sha256=-5T22a7XbNDjQxC3pLySH85iAdlqSM2ZrR4ZIIYk_KA,2808
47
49
  openai_sdk_helpers/structure/__init__.py,sha256=jROw0IbXYVRD2Eb3dBMsB6amQZrX8X7XSgGh_zjsZWc,3469
48
50
  openai_sdk_helpers/structure/agent_blueprint.py,sha256=VyJWkgPNzAYKRDMeR1M4kE6qqQURnwqtrrEn0TRJf0g,9698
49
51
  openai_sdk_helpers/structure/base.py,sha256=7JuHxKkLR5gP0RWGQIjOQlvySfain6LrB4-zHb0oFxo,25298
@@ -73,14 +75,14 @@ openai_sdk_helpers/utils/validation.py,sha256=ZjnZNOy5AoFlszRxarNol6YZwfgw6LnwPt
73
75
  openai_sdk_helpers/utils/json/__init__.py,sha256=wBm1DBgfy_n1uSUcnCPyqBn_cCq41mijjPigSMZJZqg,2118
74
76
  openai_sdk_helpers/utils/json/base_model.py,sha256=8j__oKly46RRekmRjwFZjAxBHhZkIjMADcJReKo-QsQ,5100
75
77
  openai_sdk_helpers/utils/json/data_class.py,sha256=hffMQQTNTwybuMTOtmKNzxd6kXrVyQen67F5BE_OGqE,6469
76
- openai_sdk_helpers/utils/json/ref.py,sha256=jIzv3M96G1b9x2tW8JZBqKBxAsE3bUhVmMADBFH6Jb4,2806
78
+ openai_sdk_helpers/utils/json/ref.py,sha256=FqBIRWIw33Up3rFyTlLYljcuUjg43f6Nu5wX3tOXn54,2809
77
79
  openai_sdk_helpers/utils/json/utils.py,sha256=iyc25tnObqXQJWPKLZMVts932GArdKer59KuC8aQKsY,5948
78
80
  openai_sdk_helpers/vector_storage/__init__.py,sha256=L5LxO09puh9_yBB9IDTvc1CvVkARVkHqYY1KX3inB4c,975
79
81
  openai_sdk_helpers/vector_storage/cleanup.py,sha256=ImWIE-9lli-odD8qIARvmeaa0y8ZD4pYYP-kT0O3178,3552
80
82
  openai_sdk_helpers/vector_storage/storage.py,sha256=A6zJDicObdSOVSlzhHVxEGq_tKO2_bNcsYi94xsKDNI,23655
81
83
  openai_sdk_helpers/vector_storage/types.py,sha256=jTCcOYMeOpZWvcse0z4T3MVs-RBOPC-fqWTBeQrgafU,1639
82
- openai_sdk_helpers-0.4.1.dist-info/METADATA,sha256=AHH6lc1N3igZSpnjxss0wQpxz8EjP-qLY7IBWRRg93g,23557
83
- openai_sdk_helpers-0.4.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
84
- openai_sdk_helpers-0.4.1.dist-info/entry_points.txt,sha256=gEOD1ZeXe8d2OP-KzUlG-b_9D9yUZTCt-GFW3EDbIIY,63
85
- openai_sdk_helpers-0.4.1.dist-info/licenses/LICENSE,sha256=CUhc1NrE50bs45tcXF7OcTQBKEvkUuLqeOHgrWQ5jaA,1067
86
- openai_sdk_helpers-0.4.1.dist-info/RECORD,,
84
+ openai_sdk_helpers-0.4.2.dist-info/METADATA,sha256=h1-_VwnRxkgrCwRJMEwpqMe3TCeUuMQ0U-PwYoJrJkU,23557
85
+ openai_sdk_helpers-0.4.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
86
+ openai_sdk_helpers-0.4.2.dist-info/entry_points.txt,sha256=gEOD1ZeXe8d2OP-KzUlG-b_9D9yUZTCt-GFW3EDbIIY,63
87
+ openai_sdk_helpers-0.4.2.dist-info/licenses/LICENSE,sha256=CUhc1NrE50bs45tcXF7OcTQBKEvkUuLqeOHgrWQ5jaA,1067
88
+ openai_sdk_helpers-0.4.2.dist-info/RECORD,,