openai-sdk-helpers 0.4.3__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. openai_sdk_helpers/__init__.py +41 -7
  2. openai_sdk_helpers/agent/__init__.py +1 -2
  3. openai_sdk_helpers/agent/base.py +89 -173
  4. openai_sdk_helpers/agent/configuration.py +12 -20
  5. openai_sdk_helpers/agent/coordinator.py +14 -17
  6. openai_sdk_helpers/agent/runner.py +3 -45
  7. openai_sdk_helpers/agent/search/base.py +49 -71
  8. openai_sdk_helpers/agent/search/vector.py +82 -110
  9. openai_sdk_helpers/agent/search/web.py +103 -81
  10. openai_sdk_helpers/agent/summarizer.py +20 -28
  11. openai_sdk_helpers/agent/translator.py +17 -23
  12. openai_sdk_helpers/agent/validator.py +17 -23
  13. openai_sdk_helpers/errors.py +9 -0
  14. openai_sdk_helpers/extract/__init__.py +23 -0
  15. openai_sdk_helpers/extract/extractor.py +157 -0
  16. openai_sdk_helpers/extract/generator.py +476 -0
  17. openai_sdk_helpers/prompt/extractor_config_agent_instructions.jinja +6 -0
  18. openai_sdk_helpers/prompt/extractor_config_generator.jinja +37 -0
  19. openai_sdk_helpers/prompt/extractor_config_generator_instructions.jinja +9 -0
  20. openai_sdk_helpers/prompt/extractor_prompt_optimizer_agent_instructions.jinja +4 -0
  21. openai_sdk_helpers/prompt/extractor_prompt_optimizer_request.jinja +11 -0
  22. openai_sdk_helpers/response/__init__.py +2 -6
  23. openai_sdk_helpers/response/base.py +85 -94
  24. openai_sdk_helpers/response/configuration.py +39 -14
  25. openai_sdk_helpers/response/files.py +2 -0
  26. openai_sdk_helpers/response/runner.py +1 -48
  27. openai_sdk_helpers/response/tool_call.py +0 -141
  28. openai_sdk_helpers/response/vector_store.py +8 -5
  29. openai_sdk_helpers/streamlit_app/app.py +1 -1
  30. openai_sdk_helpers/structure/__init__.py +16 -0
  31. openai_sdk_helpers/structure/base.py +239 -278
  32. openai_sdk_helpers/structure/extraction.py +1228 -0
  33. openai_sdk_helpers/structure/plan/plan.py +0 -20
  34. openai_sdk_helpers/structure/plan/task.py +0 -33
  35. openai_sdk_helpers/structure/prompt.py +16 -0
  36. openai_sdk_helpers/structure/responses.py +2 -2
  37. openai_sdk_helpers/structure/web_search.py +0 -10
  38. openai_sdk_helpers/tools.py +346 -99
  39. openai_sdk_helpers/utils/__init__.py +7 -0
  40. openai_sdk_helpers/utils/json/base_model.py +315 -32
  41. openai_sdk_helpers/utils/langextract.py +194 -0
  42. {openai_sdk_helpers-0.4.3.dist-info → openai_sdk_helpers-0.5.0.dist-info}/METADATA +18 -4
  43. {openai_sdk_helpers-0.4.3.dist-info → openai_sdk_helpers-0.5.0.dist-info}/RECORD +46 -37
  44. openai_sdk_helpers/streamlit_app/streamlit_web_search.py +0 -75
  45. {openai_sdk_helpers-0.4.3.dist-info → openai_sdk_helpers-0.5.0.dist-info}/WHEEL +0 -0
  46. {openai_sdk_helpers-0.4.3.dist-info → openai_sdk_helpers-0.5.0.dist-info}/entry_points.txt +0 -0
  47. {openai_sdk_helpers-0.4.3.dist-info → openai_sdk_helpers-0.5.0.dist-info}/licenses/LICENSE +0 -0
@@ -24,8 +24,6 @@ run_sync
24
24
  Execute a response workflow synchronously with resource cleanup.
25
25
  run_async
26
26
  Execute a response workflow asynchronously with resource cleanup.
27
- run_streamed
28
- Execute a response workflow and return the asynchronous result.
29
27
  attach_vector_store
30
28
  Attach vector stores to a response's file_search tool.
31
29
  process_files
@@ -38,8 +36,8 @@ from .base import ResponseBase
38
36
  from .configuration import ResponseConfiguration, ResponseRegistry, get_default_registry
39
37
  from .files import process_files
40
38
  from .messages import ResponseMessage, ResponseMessages
41
- from .runner import run_async, run_streamed, run_sync
42
- from .tool_call import ResponseToolCall, parse_tool_arguments
39
+ from .runner import run_async, run_sync
40
+ from .tool_call import ResponseToolCall
43
41
  from .vector_store import attach_vector_store
44
42
 
45
43
  __all__ = [
@@ -51,9 +49,7 @@ __all__ = [
51
49
  "ResponseMessages",
52
50
  "run_sync",
53
51
  "run_async",
54
- "run_streamed",
55
52
  "ResponseToolCall",
56
- "parse_tool_arguments",
57
53
  "attach_vector_store",
58
54
  "process_files",
59
55
  ]
@@ -46,6 +46,7 @@ from .messages import ResponseMessage, ResponseMessages
46
46
  from ..settings import OpenAISettings
47
47
  from ..structure import StructureBase
48
48
  from ..types import OpenAIClient
49
+ from ..tools import ToolHandlerRegistration, ToolSpec
49
50
  from ..utils import (
50
51
  check_filepath,
51
52
  coerce_jsonable,
@@ -58,7 +59,6 @@ if TYPE_CHECKING: # pragma: no cover - only for typing hints
58
59
  from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
59
60
 
60
61
  T = TypeVar("T", bound=StructureBase)
61
- ToolHandler = Callable[[ResponseFunctionToolCall], str | Any]
62
62
  RB = TypeVar("RB", bound="ResponseBase[StructureBase]")
63
63
 
64
64
 
@@ -81,8 +81,9 @@ class ResponseBase(Generic[T]):
81
81
  and naming vector stores.
82
82
  instructions : str
83
83
  System instructions provided to the OpenAI API for context.
84
- tools : list or None
85
- Tool definitions for the OpenAI API request. Pass None for no tools.
84
+ tools : list[ToolHandlerRegistration] or None
85
+ Tool handler registrations for the OpenAI API request. Pass None for
86
+ no tools.
86
87
  output_structure : type[StructureBase] or None
87
88
  Structure class used to parse tool call outputs. When provided,
88
89
  the schema is automatically generated using the structure's
@@ -93,8 +94,9 @@ class ResponseBase(Generic[T]):
93
94
  Optional absolute directory path for storing artifacts. If not provided,
94
95
  defaults to get_data_path(class_name). Session files are saved as
95
96
  data_path / uuid.json.
96
- tool_handlers : dict[str, ToolHandler] or None, default None
97
- Mapping from tool names to callable handlers. Each handler receives
97
+ tool_handlers : dict[str, ToolHandlerRegistration] or None, default None
98
+ Mapping from tool names to handler registrations that include optional
99
+ ToolSpec metadata to parse tool outputs by name. Each handler receives
98
100
  a ResponseFunctionToolCall and returns a string or any serializable
99
101
  result. Defaults to an empty dict when not provided.
100
102
  openai_settings : OpenAISettings or None, default None
@@ -119,12 +121,12 @@ class ResponseBase(Generic[T]):
119
121
 
120
122
  Methods
121
123
  -------
122
- run_async(content, attachments=None)
124
+ run_async(content, files=None, use_vector_store=False)
123
125
  Generate a response asynchronously and return parsed output.
124
- run_sync(content, attachments=None)
126
+ run_sync(content, files=None, use_vector_store=False)
125
127
  Execute run_async synchronously with thread management.
126
- run_streamed(content, attachments=None)
127
- Execute run_async and await the result (streaming not yet supported).
128
+ register_tool(func, tool_spec)
129
+ Register a tool handler and definition from a ToolSpec.
128
130
  get_last_tool_message()
129
131
  Return the most recent tool message or None.
130
132
  get_last_user_message()
@@ -158,11 +160,11 @@ class ResponseBase(Generic[T]):
158
160
  *,
159
161
  name: str,
160
162
  instructions: str,
161
- tools: list | None,
163
+ tools: list[ToolHandlerRegistration] | None,
162
164
  output_structure: type[T] | None,
163
165
  system_vector_store: list[str] | None = None,
164
166
  data_path: Path | str | None = None,
165
- tool_handlers: dict[str, ToolHandler] | None = None,
167
+ tool_handlers: dict[str, ToolHandlerRegistration] | None = None,
166
168
  openai_settings: OpenAISettings | None = None,
167
169
  ) -> None:
168
170
  """Initialize a response session with OpenAI configuration.
@@ -178,8 +180,9 @@ class ResponseBase(Generic[T]):
178
180
  and naming vector stores.
179
181
  instructions : str
180
182
  System instructions provided to the OpenAI API for context.
181
- tools : list or None
182
- Tool definitions for the OpenAI API request. Pass None for no tools.
183
+ tools : list[ToolHandlerRegistration] or None
184
+ Tool handler registrations for the OpenAI API request. Pass None for
185
+ no tools.
183
186
  output_structure : type[StructureBase] or None
184
187
  Structure class used to parse tool call outputs. When provided,
185
188
  the schema is automatically generated using the structure's
@@ -190,8 +193,9 @@ class ResponseBase(Generic[T]):
190
193
  Optional absolute directory path for storing artifacts. If not provided,
191
194
  defaults to get_data_path(class_name). Session files are saved as
192
195
  data_path / uuid.json.
193
- tool_handlers : dict[str, ToolHandler] or None, default None
194
- Mapping from tool names to callable handlers. Each handler receives
196
+ tool_handlers : dict[str, ToolHandlerRegistration] or None, default None
197
+ Mapping from tool names to handler registrations that include optional
198
+ ToolSpec metadata to parse tool outputs by name. Each handler receives
195
199
  a ResponseFunctionToolCall and returns a string or any serializable
196
200
  result. Defaults to an empty dict when not provided.
197
201
  openai_settings : OpenAISettings or None, default None
@@ -222,9 +226,7 @@ class ResponseBase(Generic[T]):
222
226
  if openai_settings is None:
223
227
  raise ValueError("openai_settings is required")
224
228
 
225
- if tool_handlers is None:
226
- tool_handlers = {}
227
- self._tool_handlers = tool_handlers
229
+ self._tool_handlers = tool_handlers or {}
228
230
  self.uuid = uuid.uuid4()
229
231
  self._name = name
230
232
 
@@ -242,8 +244,14 @@ class ResponseBase(Generic[T]):
242
244
  self._data_path = get_data_path(self.__class__.__name__)
243
245
 
244
246
  self._instructions = instructions
245
- self._tools = tools if tools is not None else []
247
+ self._tools: list[dict[str, Any]] | None = None
248
+ if tools is not None:
249
+ self._tools = [
250
+ tool_handler.tool_spec.as_tool_definition() for tool_handler in tools
251
+ ]
252
+
246
253
  self._output_structure = output_structure
254
+ self._system_vector_store = system_vector_store
247
255
  self._openai_settings = openai_settings
248
256
 
249
257
  if not self._openai_settings.api_key:
@@ -339,7 +347,7 @@ class ResponseBase(Generic[T]):
339
347
  return self._instructions
340
348
 
341
349
  @property
342
- def tools(self) -> list | None:
350
+ def tools(self) -> list[dict[str, Any]] | None:
343
351
  """Return the tool definitions for this response.
344
352
 
345
353
  Returns
@@ -370,6 +378,43 @@ class ResponseBase(Generic[T]):
370
378
  """
371
379
  return self._output_structure
372
380
 
381
+ def register_tool(
382
+ self,
383
+ func: Callable[..., Any],
384
+ *,
385
+ tool_spec: ToolSpec,
386
+ ) -> None:
387
+ """Register a tool handler and definition using a ToolSpec.
388
+
389
+ Parameters
390
+ ----------
391
+ func : Callable[..., Any]
392
+ Tool implementation function to wrap for argument parsing and
393
+ result serialization.
394
+ tool_spec : ToolSpec
395
+ Tool specification describing input/output structures and metadata.
396
+
397
+ Returns
398
+ -------
399
+ None
400
+ Register the tool handler and definition for this response session.
401
+
402
+ Raises
403
+ ------
404
+ ValueError
405
+ If tool_spec.tool_name is empty.
406
+
407
+ Examples
408
+ --------
409
+ >>> response.register_tool(run_search, tool_spec=search_tool_spec)
410
+ """
411
+ if not tool_spec.tool_name:
412
+ raise ValueError("tool_spec.tool_name must be a non-empty string")
413
+ self._tool_handlers[tool_spec.tool_name] = ToolHandlerRegistration(
414
+ handler=func,
415
+ tool_spec=tool_spec,
416
+ )
417
+
373
418
  def _build_input(
374
419
  self,
375
420
  content: str | list[str],
@@ -455,7 +500,7 @@ class ResponseBase(Generic[T]):
455
500
  content: str | list[str],
456
501
  files: str | list[str] | None = None,
457
502
  use_vector_store: bool = False,
458
- ) -> T | None:
503
+ ) -> T | str:
459
504
  """Generate a response asynchronously from the OpenAI API.
460
505
 
461
506
  Builds input messages, sends the request to OpenAI, processes any
@@ -480,8 +525,8 @@ class ResponseBase(Generic[T]):
480
525
 
481
526
  Returns
482
527
  -------
483
- T or None
484
- Parsed response object of type output_structure, or None if
528
+ T or str
529
+ Parsed response object of type output_structure, or raw string if
485
530
  no structured output was produced.
486
531
 
487
532
  Raises
@@ -540,15 +585,18 @@ class ResponseBase(Generic[T]):
540
585
  )
541
586
 
542
587
  tool_name = response_output.name
543
- handler = self._tool_handlers.get(tool_name)
588
+ registration = self._tool_handlers.get(tool_name)
544
589
 
545
- if handler is None:
590
+ if registration is None:
546
591
  log(
547
592
  f"No handler found for tool '{tool_name}'",
548
593
  level=logging.ERROR,
549
594
  )
550
595
  raise ValueError(f"No handler for tool: {tool_name}")
551
596
 
597
+ handler = registration.handler
598
+ tool_spec = registration.tool_spec
599
+
552
600
  try:
553
601
  if inspect.iscoroutinefunction(handler):
554
602
  tool_result_json = await handler(response_output)
@@ -571,9 +619,11 @@ class ResponseBase(Generic[T]):
571
619
  )
572
620
  raise RuntimeError(f"Error in tool handler '{tool_name}': {exc}")
573
621
 
574
- if self._output_structure:
575
- output_dict = self._output_structure.from_raw_input(tool_result)
576
- output_dict.console_print()
622
+ if tool_spec is not None:
623
+ output_dict = tool_spec.output_structure.from_json(tool_result)
624
+ parsed_result = cast(T, output_dict)
625
+ elif self._output_structure:
626
+ output_dict = self._output_structure.from_json(tool_result)
577
627
  parsed_result = output_dict
578
628
  else:
579
629
  print(tool_result)
@@ -588,13 +638,13 @@ class ResponseBase(Generic[T]):
588
638
  try:
589
639
  output_dict = json.loads(raw_text)
590
640
  if self._output_structure:
591
- return self._output_structure.from_raw_input(output_dict)
641
+ return self._output_structure.from_json(output_dict)
592
642
  return output_dict
593
643
  except Exception:
594
644
  print(raw_text)
595
645
  if parsed_result is not None:
596
646
  return parsed_result
597
- return None
647
+ return response.output_text
598
648
 
599
649
  def run_sync(
600
650
  self,
@@ -602,7 +652,7 @@ class ResponseBase(Generic[T]):
602
652
  *,
603
653
  files: str | list[str] | None = None,
604
654
  use_vector_store: bool = False,
605
- ) -> T | None:
655
+ ) -> T | str:
606
656
  """Execute run_async synchronously with proper event loop handling.
607
657
 
608
658
  Automatically detects if an event loop is already running and uses
@@ -627,8 +677,8 @@ class ResponseBase(Generic[T]):
627
677
 
628
678
  Returns
629
679
  -------
630
- T or None
631
- Parsed response object of type output_structure, or None.
680
+ T or str
681
+ Parsed response object of type output_structure, or raw string.
632
682
 
633
683
  Raises
634
684
  ------
@@ -654,7 +704,7 @@ class ResponseBase(Generic[T]):
654
704
  ... )
655
705
  """
656
706
 
657
- async def runner() -> T | None:
707
+ async def runner() -> T | str:
658
708
  return await self.run_async(
659
709
  content=content,
660
710
  files=files,
@@ -665,7 +715,7 @@ class ResponseBase(Generic[T]):
665
715
  asyncio.get_running_loop()
666
716
  except RuntimeError:
667
717
  return asyncio.run(runner())
668
- result: T | None = None
718
+ result: T | str = ""
669
719
 
670
720
  def _thread_func() -> None:
671
721
  nonlocal result
@@ -676,65 +726,6 @@ class ResponseBase(Generic[T]):
676
726
  thread.join()
677
727
  return result
678
728
 
679
- def run_streamed(
680
- self,
681
- content: str | list[str],
682
- *,
683
- files: str | list[str] | None = None,
684
- use_vector_store: bool = False,
685
- ) -> T | None:
686
- """Execute run_async and await the result.
687
-
688
- Streaming responses are not yet fully supported, so this method
689
- simply awaits run_async to provide API compatibility with agent
690
- interfaces.
691
-
692
- Automatically detects file types:
693
- - Images are sent as base64-encoded images
694
- - Documents are sent as base64-encoded files (default)
695
- - Documents can optionally use vector stores for RAG
696
-
697
- Parameters
698
- ----------
699
- content : str or list[str]
700
- Prompt text or list of prompt texts to send.
701
- files : str, list[str], or None, default None
702
- Optional file path or list of file paths. Each file is
703
- automatically processed based on its type.
704
- use_vector_store : bool, default False
705
- If True, non-image files are uploaded to a vector store
706
- for RAG-enabled search instead of inline base64 encoding.
707
-
708
- Returns
709
- -------
710
- T or None
711
- Parsed response object of type output_structure, or None.
712
-
713
- Raises
714
- ------
715
- RuntimeError
716
- If the API returns no output.
717
- If a tool handler raises an exception.
718
- ValueError
719
- If the API invokes a tool with no registered handler.
720
-
721
- Notes
722
- -----
723
- This method exists for API consistency but does not currently
724
- provide true streaming functionality.
725
-
726
- Examples
727
- --------
728
- >>> result = response.run_streamed("Analyze these files")
729
- """
730
- return asyncio.run(
731
- self.run_async(
732
- content=content,
733
- files=files,
734
- use_vector_store=use_vector_store,
735
- )
736
- )
737
-
738
729
  def get_last_tool_message(self) -> ResponseMessage | None:
739
730
  """Return the most recent tool message from conversation history.
740
731
 
@@ -8,7 +8,8 @@ from typing import Generic, Optional, Sequence, Type, TypeVar
8
8
 
9
9
  from ..settings import OpenAISettings
10
10
  from ..structure.base import StructureBase
11
- from .base import ResponseBase, ToolHandler
11
+ from .base import ResponseBase
12
+ from ..tools import ToolHandlerRegistration
12
13
  from ..utils.json.data_class import DataclassJSONSerializable
13
14
  from ..utils.registry import RegistryBase
14
15
  from ..utils.instructions import resolve_instructions_from_path
@@ -23,6 +24,21 @@ class ResponseRegistry(RegistryBase["ResponseConfiguration"]):
23
24
  Inherits from RegistryBase to provide centralized storage and retrieval
24
25
  of response configurations, enabling reusable response specs across the application.
25
26
 
27
+ Methods
28
+ -------
29
+ register(configuration)
30
+ Add a configuration to the registry.
31
+ get(name)
32
+ Retrieve a configuration by name.
33
+ list_names()
34
+ Return all registered configuration names.
35
+ clear()
36
+ Remove all registered configurations.
37
+ save_to_directory(path)
38
+ Export all registered configurations to JSON files.
39
+ load_from_directory(path, config_class)
40
+ Load configurations from JSON files in a directory.
41
+
26
42
  Examples
27
43
  --------
28
44
  >>> registry = ResponseRegistry()
@@ -61,12 +77,11 @@ def get_default_registry() -> ResponseRegistry:
61
77
 
62
78
  @dataclass(frozen=True, slots=True)
63
79
  class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
64
- """
65
- Represent an immutable configuration describing input and output structures.
80
+ """Represent an immutable configuration describing input and output structures.
66
81
 
67
82
  Encapsulate all metadata required to define how a request is interpreted and
68
83
  how a response is structured, while enforcing strict type and runtime safety.
69
- Inherits from DataclassJSONSerializable to support serialization to JSON format.
84
+ Inherit from DataclassJSONSerializable to support serialization to JSON format.
70
85
 
71
86
  Parameters
72
87
  ----------
@@ -87,9 +102,11 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
87
102
  system_vector_store : list[str], optional
88
103
  Optional list of vector store names to attach as system context.
89
104
  Default is None.
90
- data_path : Path, str, or None, optional
91
- Optional absolute directory path for storing artifacts. If not provided,
92
- defaults to get_data_path(class_name). Default is None.
105
+ add_output_instructions : bool, optional
106
+ Whether to append output structure instructions to the prompt.
107
+ Default is False.
108
+ add_web_search_tool : bool, optional
109
+ Whether to append a web_search tool to the tool list. Default is False.
93
110
 
94
111
  Raises
95
112
  ------
@@ -108,6 +125,12 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
108
125
  -------
109
126
  __post_init__()
110
127
  Validate configuration invariants and enforce StructureBase subclassing.
128
+ get_resolved_instructions()
129
+ Return instructions with optional output structure guidance appended.
130
+ get_resolved_tools()
131
+ Return tools list with optional web_search tool appended.
132
+ gen_response(openai_settings, data_path=None, tool_handlers=None)
133
+ Build a ResponseBase instance from this configuration.
111
134
  to_json()
112
135
  Return a JSON-compatible dict representation (inherited from JSONSerializable).
113
136
  to_json_file(filepath)
@@ -119,7 +142,7 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
119
142
 
120
143
  Examples
121
144
  --------
122
- >>> configuration = Configuration(
145
+ >>> configuration = ResponseConfiguration(
123
146
  ... name="targeting_to_plan",
124
147
  ... tools=None,
125
148
  ... input_structure=PromptStructure,
@@ -139,8 +162,7 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
139
162
  add_web_search_tool: bool = False
140
163
 
141
164
  def __post_init__(self) -> None:
142
- """
143
- Validate configuration invariants after initialization.
165
+ """Validate configuration invariants after initialization.
144
166
 
145
167
  Enforce non-empty naming, correct typing of structures, and ensure that
146
168
  any declared structure subclasses StructureBase.
@@ -222,7 +244,7 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
222
244
  *,
223
245
  openai_settings: OpenAISettings,
224
246
  data_path: Optional[Path] = None,
225
- tool_handlers: dict[str, ToolHandler] | None = None,
247
+ tool_handlers: dict[str, ToolHandlerRegistration] | None = None,
226
248
  ) -> ResponseBase[TOut]:
227
249
  """Generate a ResponseBase instance based on the configuration.
228
250
 
@@ -230,9 +252,12 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
230
252
  ----------
231
253
  openai_settings : OpenAISettings
232
254
  Authentication and model settings applied to the generated
233
- :class:`ResponseBase`.
234
- tool_handlers : dict[str, Callable], optional
235
- Mapping of tool names to handler callables. Defaults to an empty
255
+ ResponseBase.
256
+ data_path : Path or None, default None
257
+ Optional override for the response artifact directory.
258
+ tool_handlers : dict[str, ToolHandlerRegistration], optional
259
+ Mapping of tool names to handler registrations. Registrations can include
260
+ ToolSpec metadata to parse tool outputs by name. Defaults to an empty
236
261
  dictionary when not provided.
237
262
 
238
263
  Returns
@@ -151,6 +151,8 @@ def _upload_to_vector_store(
151
151
  model=response._model,
152
152
  )
153
153
  user_vector_storage = cast(Any, response._user_vector_storage)
154
+ if response._tools is None:
155
+ response._tools = []
154
156
  if not any(tool.get("type") == "file_search" for tool in response._tools):
155
157
  response._tools.append(
156
158
  {
@@ -7,12 +7,10 @@ They simplify common usage patterns for both synchronous and asynchronous contex
7
7
 
8
8
  from __future__ import annotations
9
9
 
10
- import asyncio
11
10
  from typing import Any, TypeVar
12
11
 
13
12
  from .base import ResponseBase
14
13
 
15
-
16
14
  R = TypeVar("R", bound=ResponseBase[Any])
17
15
 
18
16
 
@@ -100,49 +98,4 @@ async def run_async(
100
98
  response.close()
101
99
 
102
100
 
103
- def run_streamed(
104
- response_cls: type[R],
105
- *,
106
- content: str,
107
- response_kwargs: dict[str, Any] | None = None,
108
- ) -> Any:
109
- """Execute a response workflow and return the awaited result.
110
-
111
- Provides API compatibility with agent interfaces. Streaming responses
112
- are not currently fully supported, so this executes run_async and
113
- awaits the result.
114
-
115
- Parameters
116
- ----------
117
- response_cls : type[ResponseBase]
118
- Response class to instantiate for the workflow.
119
- content : str
120
- Prompt text to send to the OpenAI API.
121
- response_kwargs : dict[str, Any] or None, default None
122
- Optional keyword arguments forwarded to response_cls constructor.
123
-
124
- Returns
125
- -------
126
- Any
127
- Parsed response from run_async, typically a structured output or None.
128
-
129
- Notes
130
- -----
131
- This function exists for API consistency but does not currently provide
132
- true streaming functionality.
133
-
134
- Examples
135
- --------
136
- >>> from openai_sdk_helpers.response import run_streamed
137
- >>> result = run_streamed(
138
- ... MyResponse,
139
- ... content="Process this text",
140
- ... response_kwargs={"openai_settings": settings}
141
- ... )
142
- """
143
- return asyncio.run(
144
- run_async(response_cls, content=content, response_kwargs=response_kwargs)
145
- )
146
-
147
-
148
- __all__ = ["run_sync", "run_async", "run_streamed"]
101
+ __all__ = ["run_sync", "run_async"]